From noreply at buildbot.pypy.org Sat Mar 1 02:14:15 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 02:14:15 +0100 (CET) Subject: [pypy-commit] pypy test-58c3d8552833: close branch to be merged Message-ID: <20140301011415.3DB351C244E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: test-58c3d8552833 Changeset: r69566:038dc0dc8ded Date: 2014-02-28 20:09 -0500 http://bitbucket.org/pypy/pypy/changeset/038dc0dc8ded/ Log: close branch to be merged From noreply at buildbot.pypy.org Sat Mar 1 02:14:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 02:14:16 +0100 (CET) Subject: [pypy-commit] pypy default: merge getarrayitem_gc_pure opt fix Message-ID: <20140301011416.A34611C244E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69567:8dfe1848a8da Date: 2014-02-28 20:10 -0500 http://bitbucket.org/pypy/pypy/changeset/8dfe1848a8da/ Log: merge getarrayitem_gc_pure opt fix diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -194,7 +194,6 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ - p14 = getarrayitem_gc_pure(p8, i9, descr=) i14 = force_token() i16 = force_token() """) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -628,13 +628,6 @@ def optimize_DEBUG_MERGE_POINT(self, op): self.emit_operation(op) - def optimize_GETARRAYITEM_GC_PURE(self, op): - indexvalue = self.getvalue(op.getarg(1)) - if indexvalue.is_constant(): - arrayvalue = self.getvalue(op.getarg(0)) - arrayvalue.make_len_gt(MODE_ARRAY, op.getdescr(), indexvalue.box.getint()) - self.optimize_default(op) - def optimize_STRGETITEM(self, op): indexvalue = self.getvalue(op.getarg(1)) if indexvalue.is_constant(): diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -110,11 +110,6 @@ def produce_potential_short_preamble_ops(self, sb): for op in self.emitted_pure_operations: - if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \ - op.getopnum() == rop.STRGETITEM or \ - op.getopnum() == rop.UNICODEGETITEM: - if not self.getvalue(op.getarg(1)).is_constant(): - continue sb.add_potential(op) dispatch_opt = make_dispatcher_method(OptPure, 'optimize_', diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6128,13 +6128,12 @@ i5 = int_add(i1, i3) i4 = strgetitem(p1, i5) escape(i4) - jump(p1, i1, i2, i3, i5) - """ - expected = """ - [p1, i1, i2, i3, i5] - i4 = strgetitem(p1, i5) + jump(p1, i1, i2, i3, i4) + """ + expected = """ + [p1, i1, i2, i3, i4] escape(i4) - jump(p1, i1, i2, i3, i5) + jump(p1, i1, i2, i3, i4) """ self.optimize_strunicode_loop(ops, expected, preamble) @@ -6195,7 +6194,6 @@ """ expected = """ [p0, i0] - i1 = strgetitem(p0, i0) jump(p0, i0) """ self.optimize_loop(ops, expected) @@ -6211,7 +6209,6 @@ """ expected = """ [p0, i0] - i1 = unicodegetitem(p0, i0) jump(p0, i0) """ self.optimize_loop(ops, expected) @@ -7183,7 +7180,12 @@ call(i843, descr=nonwritedescr) jump(p9, i1) """ - self.optimize_loop(ops, ops) + expected = """ + [p9, i1, i843] + call(i843, descr=nonwritedescr) + jump(p9, i1, i843) + """ + self.optimize_loop(ops, expected) def test_loopinvariant_unicodelen(self): ops = """ @@ -7206,7 +7208,12 @@ call(i843, descr=nonwritedescr) jump(p9, i1) """ - self.optimize_loop(ops, ops) + expected = """ + [p9, i1, i843] + call(i843, descr=nonwritedescr) + jump(p9, i1, i843) + """ + self.optimize_loop(ops, expected) def test_loopinvariant_arraylen(self): ops = """ @@ -7332,7 +7339,12 @@ call(i843, descr=nonwritedescr) jump(p9, i1) """ - self.optimize_loop(ops, ops) + expected = """ + [p9, i1, i843] + call(i843, descr=nonwritedescr) + jump(p9, i1, i843) + """ + self.optimize_loop(ops, expected) def test_loopinvariant_constant_getarrayitem_pure(self): ops = """ From noreply at buildbot.pypy.org Sat Mar 1 02:14:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 02:14:17 +0100 (CET) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20140301011417.C93F11C244E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69568:832578ce87ea Date: 2014-02-28 20:13 -0500 http://bitbucket.org/pypy/pypy/changeset/832578ce87ea/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -88,3 +88,9 @@ .. branch: numpy-refactor Cleanup micronumpy module + +.. branch: int_w-refactor +In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. + +.. branch: test-58c3d8552833 +Fix for getarrayitem_gc_pure optimization From noreply at buildbot.pypy.org Sat Mar 1 09:18:02 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 09:18:02 +0100 (CET) Subject: [pypy-commit] pypy default: adjust array iter Message-ID: <20140301081802.83F131C1041@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69569:03465c3e4130 Date: 2014-03-01 00:18 -0500 http://bitbucket.org/pypy/pypy/changeset/03465c3e4130/ Log: adjust array iter diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -80,7 +80,7 @@ class ArrayIter(object): _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1[*]', - 'strides[*]', 'backstrides[*]', 'indices'] + 'strides[*]', 'backstrides[*]'] def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) @@ -90,17 +90,13 @@ self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides - self.indices = [0] * len(shape) self.reset() - @jit.unroll_safe def reset(self): self.index = 0 - for i in xrange(self.ndim_m1, -1, -1): - self.indices[i] = 0 + self.indices = [0] * len(self.shape_m1) self.offset = self.array.start - @jit.unroll_safe def next(self): self.index += 1 for i in xrange(self.ndim_m1, -1, -1): @@ -112,7 +108,6 @@ self.indices[i] = 0 self.offset -= self.backstrides[i] - @jit.unroll_safe def next_skip_x(self, step): assert step >= 0 if step == 0: diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -167,7 +167,6 @@ # check that we got only one loop assert len(get_stats().loops) == 1 - def define_prod(): return """ a = |30| @@ -429,6 +428,7 @@ 'raw_store': 1, 'int_add': 1, 'int_ge': 1, 'guard_false': 1, 'jump': 1, 'arraylen_gc': 1}) + def define_flat_iter(): return ''' a = |30| @@ -517,29 +517,29 @@ 'int_lt': 1, 'jump': 1, 'raw_load': 2}) - self.check_resops({'float_add': 2, + self.check_resops({'arraylen_gc': 1, + 'call': 3, + 'float_add': 2, 'float_mul': 2, - 'getarrayitem_gc': 11, - 'getarrayitem_gc_pure': 15, - 'getfield_gc': 30, - 'getfield_gc_pure': 44, + 'getfield_gc': 26, + 'getfield_gc_pure': 24, 'guard_class': 4, - 'guard_false': 14, + 'guard_false': 2, + 'guard_no_exception': 3, 'guard_nonnull': 8, 'guard_nonnull_class': 4, 'guard_not_invalidated': 2, - 'guard_true': 13, + 'guard_true': 9, 'guard_value': 4, - 'int_add': 25, - 'int_ge': 4, - 'int_le': 8, - 'int_lt': 11, - 'int_sub': 4, + 'int_add': 6, + 'int_force_ge_zero': 1, + 'int_ge': 3, + 'int_lt': 4, 'jump': 3, + 'new_array': 1, 'raw_load': 6, 'raw_store': 1, - 'setarrayitem_gc': 10, - 'setfield_gc': 14}) + 'setfield_gc': 3}) def define_argsort(): return """ From noreply at buildbot.pypy.org Sat Mar 1 09:18:03 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 09:18:03 +0100 (CET) Subject: [pypy-commit] pypy default: use intbounds to optimize int_force_ge_zero Message-ID: <20140301081803.B35451C1041@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69570:56c6402c9b80 Date: 2014-03-01 02:39 -0500 http://bitbucket.org/pypy/pypy/changeset/56c6402c9b80/ Log: use intbounds to optimize int_force_ge_zero diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -532,7 +532,6 @@ 'guard_true': 9, 'guard_value': 4, 'int_add': 6, - 'int_force_ge_zero': 1, 'int_ge': 3, 'int_lt': 4, 'jump': 3, diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -329,6 +329,13 @@ else: self.emit_operation(op) + def optimize_INT_FORCE_GE_ZERO(self, op): + value = self.getvalue(op.getarg(0)) + if value.intbound.known_ge(IntBound(0, 0)): + self.make_equal_to(op.result, value) + else: + self.emit_operation(op) + def optimize_ARRAYLEN_GC(self, op): self.emit_operation(op) array = self.getvalue(op.getarg(0)) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -654,6 +654,3 @@ dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_', default=Optimizer.optimize_default) - - - diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -4392,6 +4392,27 @@ """ self.optimize_strunicode_loop(ops, expected, preamble) + def test_bound_force_ge_zero(self): + ops = """ + [p0] + i0 = arraylen_gc(p0) + i1 = int_force_ge_zero(i0) + escape(i1) + jump(p0) + """ + preamble = """ + [p0] + i0 = arraylen_gc(p0) + escape(i0) + jump(p0, i0) + """ + expected = """ + [p0, i0] + escape(i0) + jump(p0, i0) + """ + self.optimize_loop(ops, expected, preamble) + def test_addsub_const(self): ops = """ [i0] From noreply at buildbot.pypy.org Sat Mar 1 10:33:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 10:33:37 +0100 (CET) Subject: [pypy-commit] stmgc default: Reintroduce mulitple condition variables, but this time in a more Message-ID: <20140301093337.74C9C1C3427@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r905:6fe6a5f23d8b Date: 2014-03-01 10:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/6fe6a5f23d8b/ Log: Reintroduce mulitple condition variables, but this time in a more controlled fashion. In theory, let's say the code becomes clearer to follow and it's easier to check its correctness. We'll see in practice (done refactoring, some bugs left). diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -19,40 +19,32 @@ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* I'm inevitable, so the other is not. */ assert(other_pseg->transaction_state != TS_INEVITABLE); - other_pseg->transaction_state = TS_MUST_ABORT; + other_pseg->pub.nursery_end = NSE_SIGABORT; } - else if (other_pseg->start_time < STM_PSEGMENT->start_time) { + else if (other_pseg->start_time <= STM_PSEGMENT->start_time) { /* The other thread started before us, so I should abort, as I'm the least long-running transaction. */ } else if (other_pseg->transaction_state == TS_REGULAR) { /* The other thread started strictly after us. We tell it to abort if we can (e.g. if it's not TS_INEVITABLE). */ - other_pseg->transaction_state = TS_MUST_ABORT; + other_pseg->pub.nursery_end = NSE_SIGABORT; } - if (other_pseg->transaction_state != TS_MUST_ABORT) { - /* if the other thread is not in aborting-soon mode, then we must - abort. */ + /* Now check what we just did... almost: the check at the following + line can also find a NSE_SIGABORT that was set earlier. + */ + if (other_pseg->pub.nursery_end != NSE_SIGABORT) { + /* if the other thread is not in aborting-soon mode, then *we* + must abort. */ abort_with_mutex(); } - else { - /* signal the other thread; it must abort. - - Note that we know that the target thread is running now, and - so it is or will soon be blocked at a mutex_lock() or a - cond_wait(C_SAFE_POINT). Thus broadcasting C_SAFE_POINT is - enough to wake it up in the second case. - */ - cond_broadcast(); - } } static void write_write_contention_management(uintptr_t lock_idx) { - mutex_lock(); - - if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) + s_mutex_lock(); + if (must_abort()) abort_with_mutex(); uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; @@ -61,32 +53,60 @@ uint8_t other_segment_num = prev_owner - 1; contention_management(other_segment_num); - /* the rest of this code is for the case where we continue to - run, and the other thread is asked to abort */ + /* The rest of this code is for the case where we continue to + run. We have to signal the other thread to abort, and wait + until it does. */ + + int sp = get_priv_segment(other_segment_num)->safe_point; + switch (sp) { + + case SP_RUNNING: + /* The other thread is running now, so if we set + NSE_SIGABORT in 'nursery_end', it will soon enter a + mutex_lock() and thus abort. Note that this line can + overwrite a NSE_SIGPAUSE, which is fine. + */ + get_segment(other_segment_num)->nursery_end = NSE_SIGABORT; + break; + + /* The other cases are where the other thread is at a + safe-point. We wake it up by sending the correct signal. + */ + case SP_WAIT_FOR_C_REQUEST_REMOVED: + cond_broadcast(C_REQUEST_REMOVED); + break; + + case SP_WAIT_FOR_C_AT_SAFE_POINT: + cond_broadcast(C_AT_SAFE_POINT); + break; #ifdef STM_TESTS - /* abort anyway for tests. We mustn't call cond_wait() */ - abort_with_mutex(); + case SP_WAIT_FOR_OTHER_THREAD: + /* abort anyway for tests. We can't wait here */ + abort_with_mutex(); #endif - /* first mark the other thread as "needing a safe-point" */ - struct stm_priv_segment_info_s* other_pseg; - other_pseg = get_priv_segment(other_segment_num); - assert(other_pseg->transaction_state == TS_MUST_ABORT); - other_pseg->pub.nursery_end = NSE_SIGNAL; + default: + stm_fatalerror("unexpected other_pseg->safe_point: %d", sp); + } /* wait, hopefully until the other thread broadcasts "I'm - done aborting" (spurious wake-ups are ok). */ - dprintf(("contention: wait C_SAFE_POINT...\n")); - cond_wait(); + done aborting" (spurious wake-ups are ok). Important: + this is not a safe point of any kind! The shadowstack + is not correct here. It should not end in a deadlock, + because the target thread is, in principle, guaranteed + to call abort_with_mutex(). + */ + dprintf(("contention: wait C_ABORTED...\n")); + cond_wait(C_ABORTED); dprintf(("contention: done\n")); - cond_broadcast(); + if (must_abort()) + abort_with_mutex(); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ - assert(STM_PSEGMENT->safe_point == SP_RUNNING); } - mutex_unlock(); + s_mutex_unlock(); } diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -149,7 +149,7 @@ void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) { - mutex_lock_no_abort(); + s_mutex_lock(); retry: if (jmpbuf == NULL) { @@ -171,12 +171,18 @@ #endif STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; STM_PSEGMENT->threadlocal_at_start_of_transaction = tl->thread_local_obj; - assert(STM_SEGMENT->nursery_end == NURSERY_END); dprintf(("start_transaction\n")); - mutex_unlock(); + enter_safe_point_if_requested(); + s_mutex_unlock(); + /* Now running the SP_RUNNING start. We can set our + 'transaction_read_version' after releasing the mutex, + because it is only read by a concurrent thread in + stm_commit_transaction(), which waits until SP_RUNNING + threads are paused. + */ uint8_t old_rv = STM_SEGMENT->transaction_read_version; STM_SEGMENT->transaction_read_version = old_rv + 1; if (UNLIKELY(old_rv == 0xff)) { @@ -204,12 +210,8 @@ char *remote_base = get_segment_base(remote_num); uint8_t remote_version = get_segment(remote_num)->transaction_read_version; - switch (get_priv_segment(remote_num)->transaction_state) { - case TS_NONE: - case TS_MUST_ABORT: - return; /* no need to do any check */ - default:; - } + if (get_priv_segment(remote_num)->transaction_state == TS_NONE) + return; /* no need to check */ LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, @@ -219,10 +221,9 @@ /* A write-read conflict! */ contention_management(remote_num); - /* If we reach this point, it means we aborted the other - thread. We're done here. */ - assert(get_priv_segment(remote_num)->transaction_state == - TS_MUST_ABORT); + /* If we reach this point, it means that we would like + the other thread to abort. We're done here. */ + assert(get_segment(remote_num)->nursery_end == NSE_SIGABORT); return; } })); @@ -288,8 +289,8 @@ char *local_base = STM_SEGMENT->segment_base; char *remote_base = get_segment_base(remote_num); bool remote_active = - (get_priv_segment(remote_num)->transaction_state == TS_REGULAR || - get_priv_segment(remote_num)->transaction_state == TS_INEVITABLE); + (get_priv_segment(remote_num)->transaction_state != TS_NONE && + get_segment(remote_num)->nursery_end != NSE_SIGABORT); LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, @@ -333,9 +334,6 @@ stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ - - /* wake up other threads waiting. */ - cond_broadcast(); } void stm_commit_transaction(void) @@ -349,22 +347,12 @@ minor_collection(/*commit=*/ true); - mutex_lock(); + s_mutex_lock(); - retry: - if (STM_SEGMENT->nursery_end != NURSERY_END) - collectable_safe_point(); - - STM_PSEGMENT->safe_point = SP_SAFE_POINT; - - /* wait until the other thread is at a safe-point */ - if (!try_wait_for_other_safe_points()) { - STM_PSEGMENT->safe_point = SP_RUNNING; - goto retry; - } - - /* the rest of this function either runs atomically without - releasing the mutex, or aborts the current thread. */ + /* force all other threads to be paused. They will unpause + automatically when we are done here, i.e. at mutex_unlock(). + Important: we should not call cond_wait() in the meantime. */ + synchronize_all_threads(); /* detect conflicts */ detect_write_read_conflicts(); @@ -373,7 +361,6 @@ dprintf(("commit_transaction\n")); assert(STM_SEGMENT->nursery_end == NURSERY_END); - assert(STM_PSEGMENT->transaction_state != TS_MUST_ABORT); STM_SEGMENT->jmpbuf_ptr = NULL; /* if a major collection is required, do it here */ @@ -393,17 +380,22 @@ STM_PSEGMENT->overflow_number = highest_overflow_number; } + /* send what is hopefully the correct signals */ + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + /* wake up one thread in wait_for_end_of_inevitable_transaction() */ + cond_signal(C_INEVITABLE); + } + /* done */ _finish_transaction(); + /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ - assert(STM_SEGMENT->nursery_end == NURSERY_END); - - mutex_unlock(); + s_mutex_unlock(); } void stm_abort_transaction(void) { - mutex_lock(); + s_mutex_lock(); abort_with_mutex(); } @@ -457,12 +449,12 @@ switch (STM_PSEGMENT->transaction_state) { case TS_REGULAR: - case TS_MUST_ABORT: break; case TS_INEVITABLE: - assert(!"abort: transaction_state == TS_INEVITABLE"); + stm_fatalerror("abort: transaction_state == TS_INEVITABLE"); default: - assert(!"abort: bad transaction_state"); + stm_fatalerror("abort: bad transaction_state == %d", + (int)STM_PSEGMENT->transaction_state); } assert(STM_PSEGMENT->running_pthread == pthread_self()); @@ -478,10 +470,12 @@ tl->thread_local_obj = STM_PSEGMENT->threadlocal_at_start_of_transaction; _finish_transaction(); + /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ - STM_SEGMENT->nursery_end = NURSERY_END; + /* Broadcast C_ABORTED to wake up contention.c */ + cond_broadcast(C_ABORTED); - mutex_unlock(); + s_mutex_unlock(); /* It seems to be a good idea, at least in some examples, to sleep one microsecond here before retrying. Otherwise, what was @@ -502,25 +496,16 @@ void _stm_become_inevitable(const char *msg) { - mutex_lock(); - switch (STM_PSEGMENT->transaction_state) { + s_mutex_lock(); + enter_safe_point_if_requested(); - case TS_INEVITABLE: - break; /* already good */ + if (STM_PSEGMENT->transaction_state == TS_REGULAR) { + dprintf(("become_inevitable: %s\n", msg)); - case TS_REGULAR: - /* become inevitable */ wait_for_end_of_inevitable_transaction(true); STM_PSEGMENT->transaction_state = TS_INEVITABLE; STM_SEGMENT->jmpbuf_ptr = NULL; - break; + } - case TS_MUST_ABORT: - abort_with_mutex(); - - default: - assert(!"invalid transaction_state in become_inevitable"); - } - dprintf(("become_inevitable: %s\n", msg)); - mutex_unlock(); + s_mutex_unlock(); } diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -104,10 +104,8 @@ /* The thread's safe-point state, one of the SP_xxx constants. The thread is in a "safe point" if it is not concurrently doing any - change that might cause race conditions in other threads. A - thread may enter but not *leave* the safe point it is in without - getting hold of the mutex. Broadly speaking, any value other - than SP_RUNNING means a safe point of some kind. */ + read or change in this data structure that might cause race + conditions in other threads. */ uint8_t safe_point; /* The transaction status, one of the TS_xxx constants. This is @@ -131,13 +129,16 @@ enum /* safe_point */ { SP_NO_TRANSACTION=0, SP_RUNNING, - SP_SAFE_POINT, + SP_WAIT_FOR_C_REQUEST_REMOVED, + SP_WAIT_FOR_C_AT_SAFE_POINT, +#ifdef STM_TESTS + SP_WAIT_FOR_OTHER_THREAD, +#endif }; enum /* transaction_state */ { TS_NONE=0, TS_REGULAR, TS_INEVITABLE, - TS_MUST_ABORT, }; static char *stm_object_pages; @@ -192,18 +193,4 @@ asm("/* workaround for llvm bug */"); } -static inline void abort_if_needed(void) { - switch (STM_PSEGMENT->transaction_state) { - case TS_REGULAR: - case TS_INEVITABLE: - break; - - case TS_MUST_ABORT: - stm_abort_transaction(); - - default: - assert(!"commit: bad transaction_state"); - } -} - static void synchronize_overflow_object_now(object_t *obj); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -116,24 +116,18 @@ if (!is_major_collection_requested()) return; - mutex_lock(); + s_mutex_lock(); - assert(STM_PSEGMENT->safe_point == SP_RUNNING); - STM_PSEGMENT->safe_point = SP_SAFE_POINT; + if (is_major_collection_requested()) { /* if still true */ - while (is_major_collection_requested()) { - /* wait until the other thread is at a safe-point */ - if (try_wait_for_other_safe_points()) { - /* ok */ + synchronize_all_threads(); + + if (is_major_collection_requested()) { /* if *still* true */ major_collection_now_at_safe_point(); - break; } } - assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT); - STM_PSEGMENT->safe_point = SP_RUNNING; - - mutex_unlock(); + s_mutex_unlock(); } static void major_collection_now_at_safe_point(void) diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -283,7 +283,6 @@ assert(!_has_mutex()); stm_safe_point(); - abort_if_needed(); _do_minor_collection(commit); } @@ -384,7 +383,7 @@ continue; assert(pseg->transaction_state != TS_NONE); - assert(pseg->safe_point == SP_SAFE_POINT); + assert(pseg->safe_point != SP_RUNNING); set_gs_register(get_segment_base(i)); _do_minor_collection(/*commit=*/ false); diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,6 +1,10 @@ -/* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGNAL */ -#define NSE_SIGNAL _STM_NSE_SIGNAL +/* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ +#define NSE_SIGPAUSE 0 +#define NSE_SIGABORT 1 +#if NSE_SIGABORT > _STM_NSE_SIGNAL_MAX +# error "update _STM_NSE_SIGNAL_MAX" +#endif static uint32_t highest_overflow_number; @@ -9,3 +13,7 @@ static void check_nursery_at_transaction_start(void); static void throw_away_nursery(void); static void major_do_minor_collections(void); + +static inline bool must_abort(void) { + return STM_SEGMENT->nursery_end == NSE_SIGABORT; +} diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -11,23 +11,24 @@ - SP_RUNNING: a thread is running a transaction using this segment. - - SP_SAFE_POINT: the thread that owns this segment is currently + - SP_WAIT_FOR_xxx: the thread that owns this segment is currently suspended in a safe-point. (A safe-point means that it is not changing anything right now, and the current shadowstack is correct.) - Synchronization is done with a single mutex / condition variable. A - thread needs to have acquired the mutex in order to do things like - acquiring or releasing ownership of a segment or updating this - segment's state. No other thread can acquire the mutex concurrently, - and so there is no race: the (single) thread owning the mutex can - freely inspect or even change the state of other segments too. + Synchronization is done with a single mutex and a few condition + variables. A thread needs to have acquired the mutex in order to do + things like acquiring or releasing ownership of a segment or updating + this segment's state. No other thread can acquire the mutex + concurrently, and so there is no race: the (single) thread owning the + mutex can freely inspect or even change the state of other segments + too. */ static union { struct { pthread_mutex_t global_mutex; - pthread_cond_t global_cond; + pthread_cond_t cond[_C_TOTAL]; /* some additional pieces of global state follow */ uint8_t in_use[NB_SEGMENTS]; /* 1 if running a pthread */ uint64_t global_time; @@ -41,8 +42,11 @@ if (pthread_mutex_init(&sync_ctl.global_mutex, NULL) != 0) stm_fatalerror("mutex initialization: %m\n"); - if (pthread_cond_init(&sync_ctl.global_cond, NULL) != 0) - stm_fatalerror("cond initialization: %m\n"); + long i; + for (i = 0; i < _C_TOTAL; i++) { + if (pthread_cond_init(&sync_ctl.cond[i], NULL) != 0) + stm_fatalerror("cond initialization: %m\n"); + } } static void teardown_sync(void) @@ -50,8 +54,11 @@ if (pthread_mutex_destroy(&sync_ctl.global_mutex) != 0) stm_fatalerror("mutex destroy: %m\n"); - if (pthread_cond_destroy(&sync_ctl.global_cond) != 0) - stm_fatalerror("cond destroy: %m\n"); + long i; + for (i = 0; i < _C_TOTAL; i++) { + if (pthread_cond_destroy(&sync_ctl.cond[i]) != 0) + stm_fatalerror("cond destroy: %m\n"); + } memset(&sync_ctl, 0, sizeof(sync_ctl.in_use)); } @@ -70,7 +77,7 @@ stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m\n"); } -static inline void mutex_lock_no_abort(void) +static inline void s_mutex_lock(void) { assert(!_has_mutex_here); if (UNLIKELY(pthread_mutex_lock(&sync_ctl.global_mutex) != 0)) @@ -78,47 +85,59 @@ assert((_has_mutex_here = true, 1)); } -static inline void mutex_lock(void) +static inline void s_mutex_unlock(void) { - mutex_lock_no_abort(); - if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) - abort_with_mutex(); -} - -static inline void mutex_unlock(void) -{ - assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION || - STM_PSEGMENT->safe_point == SP_RUNNING); - assert(_has_mutex_here); if (UNLIKELY(pthread_mutex_unlock(&sync_ctl.global_mutex) != 0)) stm_fatalerror("pthread_mutex_unlock: %m\n"); assert((_has_mutex_here = false, 1)); } -static inline void cond_wait_no_abort(void) +static inline void cond_wait(enum cond_type_e ctype) { #ifdef STM_NO_COND_WAIT - stm_fatalerror("*** cond_wait called!\n"); + stm_fatalerror("*** cond_wait/%d called!\n", (int)ctype); #endif assert(_has_mutex_here); - if (UNLIKELY(pthread_cond_wait(&sync_ctl.global_cond, + if (UNLIKELY(pthread_cond_wait(&sync_ctl.cond[ctype], &sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_cond_wait: %m\n"); + stm_fatalerror("pthread_cond_wait/%d: %m\n", (int)ctype); } -static inline void cond_wait(void) +static inline void cond_signal(enum cond_type_e ctype) { - cond_wait_no_abort(); - if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) - abort_with_mutex(); + if (UNLIKELY(pthread_cond_signal(&sync_ctl.cond[ctype]) != 0)) + stm_fatalerror("pthread_cond_signal/%d: %m\n", (int)ctype); } -static inline void cond_broadcast(void) +static inline void cond_broadcast(enum cond_type_e ctype) { - if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.global_cond) != 0)) - stm_fatalerror("pthread_cond_broadcast: %m\n"); + if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.cond[ctype]) != 0)) + stm_fatalerror("pthread_cond_broadcast/%d: %m\n", (int)ctype); +} + +/************************************************************/ + + +static void wait_for_end_of_inevitable_transaction(bool can_abort) +{ + long i; + restart: + for (i = 0; i < NB_SEGMENTS; i++) { + if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { + if (can_abort) { + /* for now, always abort if we can. We could also try + sometimes to wait for the other thread (needs to + take care about setting safe_point then) */ + abort_with_mutex(); + } + /* wait for stm_commit_transaction() to finish this + inevitable transaction */ + cond_wait(C_INEVITABLE); + goto restart; + } + } } static bool acquire_thread_segment(stm_thread_local_t *tl) @@ -151,10 +170,9 @@ goto got_num; } } - /* Wait and retry. It is guaranteed that any thread releasing its - segment will do so by acquiring the mutex and calling - cond_broadcast(). */ - cond_wait_no_abort(); + /* No segment available. Wait until release_thread_segment() + signals that one segment has been freed. */ + cond_wait(C_SEGMENT_FREE); /* Return false to the caller, which will call us again */ return false; @@ -177,28 +195,9 @@ assert(sync_ctl.in_use[tl->associated_segment_num] == 1); sync_ctl.in_use[tl->associated_segment_num] = 0; -} -static void wait_for_end_of_inevitable_transaction(bool can_abort) -{ - assert(_has_mutex()); - - long i; - restart: - for (i = 0; i < NB_SEGMENTS; i++) { - if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { - if (can_abort) { - /* XXX should we wait here? or abort? or a mix? - for now, always abort */ - abort_with_mutex(); - //cond_wait(); - } - else { - cond_wait_no_abort(); - } - goto restart; - } - } + /* wake up one of the threads waiting in acquire_thread_segment() */ + cond_signal(C_SEGMENT_FREE); } static bool _running_transaction(void) __attribute__((unused)); @@ -225,115 +224,130 @@ void _stm_start_safe_point(void) { assert(STM_PSEGMENT->safe_point == SP_RUNNING); - STM_PSEGMENT->safe_point = SP_SAFE_POINT; + STM_PSEGMENT->safe_point = SP_WAIT_FOR_OTHER_THREAD; } void _stm_stop_safe_point(void) { - assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT); + assert(STM_PSEGMENT->safe_point == SP_WAIT_FOR_OTHER_THREAD); STM_PSEGMENT->safe_point = SP_RUNNING; - if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) - stm_abort_transaction(); + stm_safe_point(); } #endif -static bool try_wait_for_other_safe_points(void) +/************************************************************/ + + +#ifndef NDEBUG +static bool _safe_points_requested = false; +#endif + +static void signal_everybody_to_pause_running(void) { - /* Must be called with the mutex. When all other threads are in a - safe point of at least the requested kind, returns. Otherwise, - asks them to enter a safe point, issues a cond_wait(), and wait. - - When this function returns, the other threads are all blocked at - safe points as requested. They may be either in their own - cond_wait(), or running at SP_NO_TRANSACTION, in which case they - should not do anything related to stm until the next time they - call mutex_lock(). - - The next time we unlock the mutex (with mutex_unlock() or - cond_wait()), they will proceed. - - This function requires that the calling thread is in a safe-point - right now, so there is no deadlock if one thread calls - try_wait_for_other_safe_points() while another is currently blocked - in the cond_wait() in this same function. - */ - - assert(_has_mutex()); - assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT); - - if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) - abort_with_mutex(); + assert(_safe_points_requested == false); + assert((_safe_points_requested = true, 1)); long i; - bool wait = false; for (i = 0; i < NB_SEGMENTS; i++) { - /* If the other thread is SP_NO_TRANSACTION, then it can be - ignored here: as long as we have the mutex, it will remain - SP_NO_TRANSACTION. If it is already at a suitable safe point, - it must be in a cond_wait(), so it will not resume as long - as we hold the mutex. Thus the only cases is if it is - SP_RUNNING, or at the wrong kind of safe point. - */ - struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); - if (other_pseg->safe_point == SP_RUNNING) { - /* we need to wait for this thread. Use NSE_SIGNAL to ask - it to enter a safe-point soon. */ - other_pseg->pub.nursery_end = NSE_SIGNAL; - wait = true; + if (get_segment(i)->nursery_end == NURSERY_END) + get_segment(i)->nursery_end = NSE_SIGPAUSE; + } +} + +static inline long count_other_threads_sp_running(void) +{ + /* Return the number of other threads in SP_RUNNING. + Asserts that SP_RUNNING threads still have the NSE_SIGxxx. */ + long i; + long result = 0; + int my_num = STM_SEGMENT->segment_num; + + for (i = 0; i < NB_SEGMENTS; i++) { + if (i != my_num && get_priv_segment(i)->safe_point == SP_RUNNING) { + assert(get_segment(i)->nursery_end <= _STM_NSE_SIGNAL_MAX); + result++; + } + } + return result; +} + +static void remove_requests_for_safe_point(void) +{ + assert(_safe_points_requested == true); + assert((_safe_points_requested = false, 1)); + + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + assert(get_segment(i)->nursery_end != NURSERY_END); + if (get_segment(i)->nursery_end == NSE_SIGPAUSE) + get_segment(i)->nursery_end = NURSERY_END; + } + cond_broadcast(C_REQUEST_REMOVED); +} + +static void enter_safe_point_if_requested(void) +{ + assert(_has_mutex()); + while (1) { + if (must_abort()) + abort_with_mutex(); + + if (STM_SEGMENT->nursery_end == NURSERY_END) + break; /* no safe point requested */ + + assert(STM_SEGMENT->nursery_end == NSE_SIGPAUSE); + + /* If we are requested to enter a safe-point, we cannot proceed now. + Wait until the safe-point request is removed for us. */ + + cond_signal(C_AT_SAFE_POINT); + STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED; + cond_wait(C_REQUEST_REMOVED); + STM_PSEGMENT->safe_point = SP_RUNNING; + } +} + +static void synchronize_all_threads(void) +{ + enter_safe_point_if_requested(); + + /* Only one thread should reach this point concurrently. This is + why: if several threads call this function, the first one that + goes past this point will set the "request safe point" on all + other threads; then none of the other threads will go past the + enter_safe_point_if_requested() above. */ + signal_everybody_to_pause_running(); + + /* If some other threads are SP_RUNNING, we cannot proceed now. + Wait until all other threads are suspended. */ + while (count_other_threads_sp_running() > 0) { + + STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_AT_SAFE_POINT; + cond_wait(C_AT_SAFE_POINT); + STM_PSEGMENT->safe_point = SP_RUNNING; + + if (must_abort()) { + remove_requests_for_safe_point(); /* => C_REQUEST_REMOVED */ + abort_with_mutex(); } } - if (wait) { - cond_wait(); - /* XXX think: I believe this can end in a busy-loop, with this thread - setting NSE_SIGNAL on the other thread; then the other thread - commits, sends C_SAFE_POINT, finish the transaction, start - the next one, and only then this thread resumes; then we're back - in the same situation as before with no progress here. - */ - return false; - } - - /* all threads are at a safe-point now. Broadcast C_RESUME, which - will allow them to resume --- but only when we release the mutex. */ - cond_broadcast(); - return true; + /* Remove the requests for safe-points now. In principle we should + remove it later, when the caller is done, but this is equivalent + as long as we hold the mutex. + */ + remove_requests_for_safe_point(); /* => C_REQUEST_REMOVED */ } void _stm_collectable_safe_point(void) { - /* If _stm_nursery_end was set to NSE_SIGNAL by another thread, + /* If 'nursery_end' was set to NSE_SIGxxx by another thread, we end up here as soon as we try to call stm_allocate() or do a call to stm_safe_point(). - - This works together with wait_for_other_safe_points() to - signal the C_SAFE_POINT condition. */ - mutex_lock(); - collectable_safe_point(); - mutex_unlock(); + s_mutex_lock(); + enter_safe_point_if_requested(); + s_mutex_unlock(); } - -static void collectable_safe_point(void) -{ - assert(_has_mutex()); - assert(STM_PSEGMENT->safe_point == SP_RUNNING); - - while (STM_SEGMENT->nursery_end == NSE_SIGNAL) { - dprintf(("collectable_safe_point...\n")); - STM_PSEGMENT->safe_point = SP_SAFE_POINT; - STM_SEGMENT->nursery_end = NURSERY_END; - - /* signal all the threads blocked in - wait_for_other_safe_points() */ - cond_broadcast(); - - cond_wait(); - - STM_PSEGMENT->safe_point = SP_RUNNING; - } - assert(STM_SEGMENT->nursery_end == NURSERY_END); - dprintf(("collectable_safe_point done\n")); -} diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -3,11 +3,20 @@ static void setup_sync(void); static void teardown_sync(void); -/* all synchronization is done via a mutex and a condition variable */ -static void mutex_lock(void); -static void mutex_unlock(void); -static void cond_wait(void); -static void cond_broadcast(void); +/* all synchronization is done via a mutex and a few condition variables */ +enum cond_type_e { + C_SEGMENT_FREE, + C_AT_SAFE_POINT, + C_REQUEST_REMOVED, + C_INEVITABLE, + C_ABORTED, + _C_TOTAL +}; +static void s_mutex_lock(void); +static void s_mutex_unlock(void); +static void cond_wait(enum cond_type_e); +static void cond_signal(enum cond_type_e); +static void cond_broadcast(enum cond_type_e); #ifndef NDEBUG static bool _has_mutex(void); #endif @@ -17,8 +26,6 @@ (must have the mutex acquired!) */ static bool acquire_thread_segment(stm_thread_local_t *tl); static void release_thread_segment(stm_thread_local_t *tl); + static void wait_for_end_of_inevitable_transaction(bool can_abort); - -/* see the source for an exact description */ -static bool try_wait_for_other_safe_points(void); -static void collectable_safe_point(void); +static void synchronize_all_threads(void); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -96,7 +96,7 @@ #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 -#define _STM_NSE_SIGNAL 0 +#define _STM_NSE_SIGNAL_MAX 1 #define _STM_FAST_ALLOC (66*1024) #define STM_FLAGS_PREBUILT _STM_GCFLAG_WRITE_BARRIER @@ -244,7 +244,7 @@ /* Forces a safe-point if needed. Normally not needed: this is automatic if you call stm_allocate(). */ static inline void stm_safe_point(void) { - if (STM_SEGMENT->nursery_end == _STM_NSE_SIGNAL) + if (STM_SEGMENT->nursery_end <= _STM_NSE_SIGNAL_MAX) _stm_collectable_safe_point(); } From noreply at buildbot.pypy.org Sat Mar 1 10:41:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 10:41:25 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix the last issue for the tests Message-ID: <20140301094125.9F6FD1D2481@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r906:bb47d1cca469 Date: 2014-03-01 10:41 +0100 http://bitbucket.org/pypy/stmgc/changeset/bb47d1cca469/ Log: Fix the last issue for the tests diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -469,6 +469,9 @@ tl->shadowstack = STM_PSEGMENT->shadowstack_at_start_of_transaction; tl->thread_local_obj = STM_PSEGMENT->threadlocal_at_start_of_transaction; + if (STM_SEGMENT->nursery_end == NSE_SIGABORT) + STM_SEGMENT->nursery_end = NURSERY_END; /* done aborting */ + _finish_transaction(); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ From noreply at buildbot.pypy.org Sat Mar 1 11:06:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 11:06:44 +0100 (CET) Subject: [pypy-commit] stmgc default: Add a space for clarity Message-ID: <20140301100644.5E6C01C3427@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r907:2b57b68330a1 Date: 2014-03-01 11:06 +0100 http://bitbucket.org/pypy/stmgc/changeset/2b57b68330a1/ Log: Add a space for clarity diff --git a/c7/stm/fprintcolor.c b/c7/stm/fprintcolor.c --- a/c7/stm/fprintcolor.c +++ b/c7/stm/fprintcolor.c @@ -13,7 +13,7 @@ char buffer[2048]; va_list ap; int result; - int size = (int)sprintf(buffer, "\033[%dm[%lx]", dprintfcolor(), + int size = (int)sprintf(buffer, "\033[%dm[%lx] ", dprintfcolor(), (long)pthread_self()); assert(size >= 0); From noreply at buildbot.pypy.org Sat Mar 1 11:59:04 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 11:59:04 +0100 (CET) Subject: [pypy-commit] pypy default: this works now... >6 years later Message-ID: <20140301105904.A632B1C3427@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69571:850fe35a5647 Date: 2014-03-01 05:57 -0500 http://bitbucket.org/pypy/pypy/changeset/850fe35a5647/ Log: this works now... >6 years later diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -184,10 +184,6 @@ unrolling_arg_tps = unrolling_iterable(enumerate(args)) def wrapper(*args): - # XXX the next line is a workaround for the annotation bug - # shown in rpython.test.test_llann:test_pbctype. Remove it - # when the test is fixed... - assert isinstance(lltype.Signed, lltype.Number) real_args = () to_free = () for i, TARGET in unrolling_arg_tps: diff --git a/rpython/rtyper/test/test_llann.py b/rpython/rtyper/test/test_llann.py --- a/rpython/rtyper/test/test_llann.py +++ b/rpython/rtyper/test/test_llann.py @@ -368,7 +368,6 @@ assert s.unsigned == True def test_pbctype(self): - py.test.skip("annotation crash") TYPE = Void TYPE2 = Signed def g(lst): From noreply at buildbot.pypy.org Sat Mar 1 15:16:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 15:16:14 +0100 (CET) Subject: [pypy-commit] pypy default: Try to improve the timetable of the jit counters: replace the two tables Message-ID: <20140301141614.389F11D2481@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69572:680434495e1e Date: 2014-03-01 15:00 +0100 http://bitbucket.org/pypy/pypy/changeset/680434495e1e/ Log: Try to improve the timetable of the jit counters: replace the two tables of 4096 entries with a single 5-ways-associative table of 2048 entries. diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -500,8 +500,9 @@ ST_BUSY_FLAG = 0x01 # if set, busy tracing from the guard ST_TYPE_MASK = 0x06 # mask for the type (TY_xxx) ST_SHIFT = 3 # in "status >> ST_SHIFT" is stored: - # - if TY_NONE, the jitcounter index directly + # - if TY_NONE, the jitcounter hash directly # - otherwise, the guard_value failarg index + ST_SHIFT_MASK = -(1 << ST_SHIFT) TY_NONE = 0x00 TY_INT = 0x02 TY_REF = 0x04 @@ -514,8 +515,8 @@ # if metainterp_sd.warmrunnerdesc is not None: # for tests jitcounter = metainterp_sd.warmrunnerdesc.jitcounter - index = jitcounter.in_second_half(jitcounter.fetch_next_index()) - self.status = index << self.ST_SHIFT + hash = jitcounter.fetch_next_hash() + self.status = hash & self.ST_SHIFT_MASK def make_a_counter_per_value(self, guard_value_op): assert guard_value_op.getopnum() == rop.GUARD_VALUE @@ -566,7 +567,7 @@ # common case: this is not a guard_value, and we are not # already busy tracing. The rest of self.status stores a # valid per-guard index in the jitcounter. - index = self.status >> self.ST_SHIFT + hash = self.status & self.ST_SHIFT_MASK # # do we have the BUSY flag? If so, we're tracing right now, e.g. in an # outer invocation of the same function, so don't trace again for now. @@ -597,12 +598,11 @@ intval = llmemory.cast_adr_to_int( llmemory.cast_int_to_adr(intval), "forced") - hash = (current_object_addr_as_int(self) * 777767777 + - intval * 1442968193) - index = jitcounter.in_second_half(jitcounter.get_index(hash)) + hash = r_uint(current_object_addr_as_int(self) * 777767777 + + intval * 1442968193) # increment = jitdriver_sd.warmstate.increment_trace_eagerness - return jitcounter.tick(index, increment) + return jitcounter.tick(hash, increment) def start_compiling(self): # start tracing and compiling from this guard. diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -7,28 +7,32 @@ assert r_uint32.BITS == 32 UINT32MAX = 2 ** 32 - 1 +# keep in sync with the C code in pypy__decay_jit_counters +ENTRY = lltype.Struct('timetable_entry', + ('times', lltype.FixedSizeArray(rffi.FLOAT, 5)), + ('subhashes', lltype.FixedSizeArray(rffi.USHORT, 5))) + class JitCounter: - DEFAULT_SIZE = 4096 + DEFAULT_SIZE = 2048 def __init__(self, size=DEFAULT_SIZE, translator=None): "NOT_RPYTHON" self.size = size - self.shift = 1 + self.shift = 16 while (UINT32MAX >> self.shift) != size - 1: self.shift += 1 - assert self.shift < 999, "size is not a power of two <= 2**31" + assert self.shift < 999, "size is not a power of two <= 2**16" # - # The table of timings. The first half is used for starting the - # compilation of new loops. The second half is used for turning - # failing guards into bridges. The two halves are split to avoid - # too much interference. - self.timetablesize = size * 2 - self.timetable = lltype.malloc(rffi.CArray(rffi.FLOAT), - self.timetablesize, + # The table of timings. This is a 5-ways associative cache. + # We index into it using a number between 0 and (size - 1), + # and we're getting a 32-bytes-long entry; then this entry + # contains 5 possible ways, each occupying 6 bytes: 4 bytes + # for a float, and the 2 lowest bytes from the original hash. + self.timetable = lltype.malloc(rffi.CArray(ENTRY), self.size, flavor='raw', zero=True, track_allocation=False) - self._nextindex = r_uint(0) + self._nexthash = r_uint(0) # # The table of JitCell entries, recording already-compiled loops self.celltable = [None] * size @@ -56,46 +60,92 @@ return 0.0 # no increment, never reach 1.0 return 1.0 / (threshold - 0.001) - def get_index(self, hash): - """Return the index (< self.size) from a hash value. This truncates + def _get_index(self, hash): + """Return the index (< self.size) from a hash. This truncates the hash to 32 bits, and then keep the *highest* remaining bits. - Be sure that hash is computed correctly.""" + Be sure that hash is computed correctly, by multiplying with + a large odd number or by fetch_next_hash().""" hash32 = r_uint(r_uint32(hash)) # mask off the bits higher than 32 index = hash32 >> self.shift # shift, resulting in a value < size return index # return the result as a r_uint - get_index._always_inline_ = True + _get_index._always_inline_ = True - def fetch_next_index(self): - result = self._nextindex - self._nextindex = (result + 1) & self.get_index(-1) + @staticmethod + def _get_subhash(hash): + return hash & 65535 + + def fetch_next_hash(self): + result = self._nexthash + # note: all three "1" bits in the following constant are needed + # to make test_counter.test_fetch_next_index pass. The first + # is to increment the "subhash" (lower 16 bits of the hash). + # The second is to increment the "index" portion of the hash. + # The third is so that after 65536 passes, the "index" is + # incremented by one more (by overflow), so that the next + # 65536 passes don't end up with the same subhashes. + self._nexthash = result + r_uint(1 | (1 << self.shift) | + (1 << (self.shift - 16))) return result - def in_second_half(self, index): - assert index < r_uint(self.size) - return self.size + index + def _swap(self, p_entry, n): + if float(p_entry.times[n]) > float(p_entry.times[n + 1]): + return n + 1 + else: + x = p_entry.times[n] + p_entry.times[n] = p_entry.times[n + 1] + p_entry.times[n + 1] = x + x = p_entry.subhashes[n] + p_entry.subhashes[n] = p_entry.subhashes[n + 1] + p_entry.subhashes[n + 1] = x + return n + _swap._always_inline_ = True - def tick(self, index, increment): - counter = float(self.timetable[index]) + increment + def tick(self, hash, increment): + p_entry = self.timetable[self._get_index(hash)] + subhash = self._get_subhash(hash) + # + if p_entry.subhashes[0] == subhash: + n = 0 + elif p_entry.subhashes[1] == subhash: + n = self._swap(p_entry, 0) + elif p_entry.subhashes[2] == subhash: + n = self._swap(p_entry, 1) + elif p_entry.subhashes[3] == subhash: + n = self._swap(p_entry, 2) + elif p_entry.subhashes[4] == subhash: + n = self._swap(p_entry, 3) + else: + n = 4 + while n > 0 and float(p_entry.times[n - 1]) == 0.0: + n -= 1 + p_entry.subhashes[n] = rffi.cast(rffi.USHORT, subhash) + p_entry.times[n] = r_singlefloat(0.0) + # + counter = float(p_entry.times[n]) + increment if counter < 1.0: - self.timetable[index] = r_singlefloat(counter) + p_entry.times[n] = r_singlefloat(counter) return False else: # when the bound is reached, we immediately reset the value to 0.0 - self.reset(index) + self.reset(hash) return True - tick._always_inline_ = True - def reset(self, index): - self.timetable[index] = r_singlefloat(0.0) + def reset(self, hash): + p_entry = self.timetable[self._get_index(hash)] + subhash = self._get_subhash(hash) + for i in range(5): + if p_entry.subhashes[i] == subhash: + p_entry.times[i] = r_singlefloat(0.0) - def lookup_chain(self, index): - return self.celltable[index] + def lookup_chain(self, hash): + return self.celltable[self._get_index(hash)] - def cleanup_chain(self, index): - self.reset(index) - self.install_new_cell(index, None) + def cleanup_chain(self, hash): + self.reset(hash) + self.install_new_cell(hash, None) - def install_new_cell(self, index, newcell): + def install_new_cell(self, hash, newcell): + index = self._get_index(hash) cell = self.celltable[index] keep = newcell while cell is not None: @@ -125,22 +175,29 @@ # important in corner cases where we would suddenly compile more # than one loop because all counters reach the bound at the same # time, but where compiling all but the first one is pointless. - size = self.timetablesize - pypy__decay_jit_counters(self.timetable, self.decay_by_mult, size) + p = rffi.cast(rffi.CCHARP, self.timetable) + pypy__decay_jit_counters(p, self.decay_by_mult, self.size) # this function is written directly in C; gcc will optimize it using SSE eci = ExternalCompilationInfo(post_include_bits=[""" -static void pypy__decay_jit_counters(float table[], double f1, long size1) { +static void pypy__decay_jit_counters(char *data, double f1, long size) { + struct { float times[5]; unsigned short subhashes[5]; } *p = data; float f = (float)f1; - int i, size = (int)size1; - for (i=0; itimes[0] *= f; + p->times[1] *= f; + p->times[2] *= f; + p->times[3] *= f; + p->times[4] *= f; + ++p; + } } """]) pypy__decay_jit_counters = rffi.llexternal( - "pypy__decay_jit_counters", [rffi.FLOATP, lltype.Float, lltype.Signed], + "pypy__decay_jit_counters", [rffi.CCHARP, lltype.Float, lltype.Signed], lltype.Void, compilation_info=eci, _nowrapper=True, sandboxsafe=True) @@ -153,11 +210,12 @@ def __init__(self): from collections import defaultdict JitCounter.__init__(self, size=8) - zero = r_singlefloat(0.0) - self.timetable = defaultdict(lambda: zero) + def make_null_entry(): + return lltype.malloc(ENTRY, immortal=True, zero=True) + self.timetable = defaultdict(make_null_entry) self.celltable = defaultdict(lambda: None) - def get_index(self, hash): + def _get_index(self, hash): "NOT_RPYTHON" return hash @@ -165,10 +223,6 @@ "NOT_RPYTHON" pass - def in_second_half(self, index): - "NOT_RPYTHON" - return index + 12345 - def _clear_all(self): self.timetable.clear() self.celltable.clear() diff --git a/rpython/jit/metainterp/test/test_counter.py b/rpython/jit/metainterp/test/test_counter.py --- a/rpython/jit/metainterp/test/test_counter.py +++ b/rpython/jit/metainterp/test/test_counter.py @@ -5,30 +5,77 @@ jc = JitCounter(size=128) # 7 bits for i in range(10): hash = 400000001 * i - index = jc.get_index(hash) + index = jc._get_index(hash) assert index == (hash >> (32 - 7)) -def test_fetch_next_index(): - jc = JitCounter(size=4) - lst = [jc.fetch_next_index() for i in range(10)] - assert lst == [0, 1, 2, 3, 0, 1, 2, 3, 0, 1] +def test_get_subhash(): + assert JitCounter._get_subhash(0x518ebd) == 0x8ebd + +def test_fetch_next_hash(): + jc = JitCounter(size=2048) + # check the distribution of "fetch_next_hash() & ~7". + blocks = [[jc.fetch_next_hash() & ~7 for i in range(65536)] + for j in range(2)] + for block in blocks: + assert 0 <= jc._get_index(block[0]) < 2048 + assert 0 <= jc._get_index(block[-1]) < 2048 + assert 0 <= jc._get_index(block[2531]) < 2048 + assert 0 <= jc._get_index(block[45981]) < 2048 + # should be correctly distributed: ideally 2047 or 2048 different + # values + assert len(set([jc._get_index(x) for x in block])) >= 2040 + # check that the subkeys are distinct for same-block entries + subkeys = {} + for block in blocks: + for x in block: + idx = jc._get_index(x) + subkeys.setdefault(idx, []).append(jc._get_subhash(x)) + collisions = 0 + for idx, sks in subkeys.items(): + collisions += len(sks) - len(set(sks)) + assert collisions < 5 + +def index2hash(jc, index, subhash=0): + assert 0 <= subhash < 65536 + return (index << jc.shift) | subhash def test_tick(): jc = JitCounter() incr = jc.compute_threshold(4) for i in range(5): - r = jc.tick(104, incr) + r = jc.tick(index2hash(jc, 104), incr) assert r is (i == 3) for i in range(5): - r = jc.tick(108, incr) - s = jc.tick(109, incr) + r = jc.tick(index2hash(jc, 108), incr) + s = jc.tick(index2hash(jc, 109), incr) assert r is (i == 3) assert s is (i == 3) - jc.reset(108) + jc.reset(index2hash(jc, 108)) for i in range(5): - r = jc.tick(108, incr) + r = jc.tick(index2hash(jc, 108), incr) assert r is (i == 3) +def test_collisions(): + jc = JitCounter(size=4) # 2 bits + incr = jc.compute_threshold(4) + for i in range(5): + for sk in range(100, 105): + r = jc.tick(index2hash(jc, 3, subhash=sk), incr) + assert r is (i == 3) + + jc = JitCounter() + incr = jc.compute_threshold(4) + misses = 0 + for i in range(5): + for sk in range(100, 106): + r = jc.tick(index2hash(jc, 3, subhash=sk), incr) + if r: + assert i == 3 + elif i == 3: + misses += 1 + assert misses < 5 + + def test_install_new_chain(): class Dead: next = None diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -7,7 +7,7 @@ from rpython.rlib.jit import PARAMETERS from rpython.rlib.nonconst import NonConstant from rpython.rlib.objectmodel import specialize, we_are_translated, r_dict -from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.annlowlevel import (hlstr, cast_base_ptr_to_instance, cast_object_to_ptr) @@ -312,7 +312,7 @@ # assert 0, "should have raised" - def bound_reached(index, cell, *args): + def bound_reached(hash, cell, *args): if not confirm_enter_jit(*args): return jitcounter.decay_all_counters() @@ -322,7 +322,7 @@ greenargs = args[:num_green_args] if cell is None: cell = JitCell(*greenargs) - jitcounter.install_new_cell(index, cell) + jitcounter.install_new_cell(hash, cell) cell.flags |= JC_TRACING try: metainterp.compile_and_run_once(jitdriver_sd, *args) @@ -339,16 +339,16 @@ # These few lines inline some logic that is also on the # JitCell class, to avoid computing the hash several times. greenargs = args[:num_green_args] - index = JitCell.get_index(*greenargs) - cell = jitcounter.lookup_chain(index) + hash = JitCell.get_uhash(*greenargs) + cell = jitcounter.lookup_chain(hash) while cell is not None: if isinstance(cell, JitCell) and cell.comparekey(*greenargs): break # found cell = cell.next else: # not found. increment the counter - if jitcounter.tick(index, increment_threshold): - bound_reached(index, None, *args) + if jitcounter.tick(hash, increment_threshold): + bound_reached(hash, None, *args) return # Here, we have found 'cell'. @@ -359,15 +359,15 @@ # this function. don't trace a second time. return # attached by compile_tmp_callback(). count normally - if jitcounter.tick(index, increment_threshold): - bound_reached(index, cell, *args) + if jitcounter.tick(hash, increment_threshold): + bound_reached(hash, cell, *args) return # machine code was already compiled for these greenargs procedure_token = cell.get_procedure_token() if procedure_token is None: # it was an aborted compilation, or maybe a weakref that # has been freed - jitcounter.cleanup_chain(index) + jitcounter.cleanup_chain(hash) return if not confirm_enter_jit(*args): return @@ -422,7 +422,6 @@ green_args_name_spec = unrolling_iterable([('g%d' % i, TYPE) for i, TYPE in enumerate(jitdriver_sd._green_args_spec)]) unwrap_greenkey = self.make_unwrap_greenkey() - random_initial_value = hash(self) # class JitCell(BaseJitCell): def __init__(self, *greenargs): @@ -441,20 +440,20 @@ return True @staticmethod - def get_index(*greenargs): - x = random_initial_value + def get_uhash(*greenargs): + x = r_uint(-1888132534) i = 0 for _, TYPE in green_args_name_spec: item = greenargs[i] - y = hash_whatever(TYPE, item) - x = intmask((x ^ y) * 1405695061) # prime number, 2**30~31 + y = r_uint(hash_whatever(TYPE, item)) + x = (x ^ y) * r_uint(1405695061) # prime number, 2**30~31 i = i + 1 - return jitcounter.get_index(x) + return x @staticmethod def get_jitcell(*greenargs): - index = JitCell.get_index(*greenargs) - cell = jitcounter.lookup_chain(index) + hash = JitCell.get_uhash(*greenargs) + cell = jitcounter.lookup_chain(hash) while cell is not None: if (isinstance(cell, JitCell) and cell.comparekey(*greenargs)): @@ -470,15 +469,15 @@ @staticmethod def ensure_jit_cell_at_key(greenkey): greenargs = unwrap_greenkey(greenkey) - index = JitCell.get_index(*greenargs) - cell = jitcounter.lookup_chain(index) + hash = JitCell.get_uhash(*greenargs) + cell = jitcounter.lookup_chain(hash) while cell is not None: if (isinstance(cell, JitCell) and cell.comparekey(*greenargs)): return cell cell = cell.next newcell = JitCell(*greenargs) - jitcounter.install_new_cell(index, newcell) + jitcounter.install_new_cell(hash, newcell) return newcell # self.JitCell = JitCell From noreply at buildbot.pypy.org Sat Mar 1 15:16:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 15:16:15 +0100 (CET) Subject: [pypy-commit] pypy default: Split tick() into an inlinable fast-path and a regular slow-path. Message-ID: <20140301141615.5D4861D2481@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69573:b7025da78ba8 Date: 2014-03-01 15:15 +0100 http://bitbucket.org/pypy/pypy/changeset/b7025da78ba8/ Log: Split tick() into an inlinable fast-path and a regular slow-path. diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -100,13 +100,8 @@ return n _swap._always_inline_ = True - def tick(self, hash, increment): - p_entry = self.timetable[self._get_index(hash)] - subhash = self._get_subhash(hash) - # - if p_entry.subhashes[0] == subhash: - n = 0 - elif p_entry.subhashes[1] == subhash: + def _tick_slowpath(self, p_entry, subhash): + if p_entry.subhashes[1] == subhash: n = self._swap(p_entry, 0) elif p_entry.subhashes[2] == subhash: n = self._swap(p_entry, 1) @@ -120,6 +115,16 @@ n -= 1 p_entry.subhashes[n] = rffi.cast(rffi.USHORT, subhash) p_entry.times[n] = r_singlefloat(0.0) + return n + + def tick(self, hash, increment): + p_entry = self.timetable[self._get_index(hash)] + subhash = self._get_subhash(hash) + # + if p_entry.subhashes[0] == subhash: + n = 0 + else: + n = self._tick_slowpath(p_entry, subhash) # counter = float(p_entry.times[n]) + increment if counter < 1.0: @@ -129,6 +134,7 @@ # when the bound is reached, we immediately reset the value to 0.0 self.reset(hash) return True + tick._always_inline_ = True def reset(self, hash): p_entry = self.timetable[self._get_index(hash)] diff --git a/rpython/jit/metainterp/test/test_counter.py b/rpython/jit/metainterp/test/test_counter.py --- a/rpython/jit/metainterp/test/test_counter.py +++ b/rpython/jit/metainterp/test/test_counter.py @@ -41,6 +41,7 @@ def test_tick(): jc = JitCounter() + jc._tick_slowpath = "not callable in this test!" incr = jc.compute_threshold(4) for i in range(5): r = jc.tick(index2hash(jc, 104), incr) From noreply at buildbot.pypy.org Sat Mar 1 17:03:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 17:03:31 +0100 (CET) Subject: [pypy-commit] stmgc default: The first test about major gc passes. Message-ID: <20140301160331.B4C091C1504@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r908:f11bf6145d1b Date: 2014-03-01 17:03 +0100 http://bitbucket.org/pypy/stmgc/changeset/f11bf6145d1b/ Log: The first test about major gc passes. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -130,8 +130,94 @@ s_mutex_unlock(); } + +/************************************************************/ + + +static struct list_s *mark_objects_to_trace; + + +static inline struct object_s *mark_first_seg(object_t *obj) +{ + return (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); +} + +static inline bool mark_is_visited(object_t *obj) +{ + uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; + assert(lock_idx >= 0); + assert(lock_idx < sizeof(write_locks)); + return write_locks[lock_idx] != 0; +} + +static inline void mark_set_visited(object_t *obj) +{ + uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; + write_locks[lock_idx] = 0xff; +} + +static inline void mark_record_trace(object_t **pobj) +{ + /* takes a normal pointer to a thread-local pointer to an object */ + object_t *obj = *pobj; + + if (obj == NULL) + return; + if (mark_is_visited(obj)) + return; /* already visited this object */ + + mark_set_visited(obj); + LIST_APPEND(mark_objects_to_trace, obj); +} + +static void mark_collect_modified_objects(void) +{ + //... +} + +static void mark_collect_roots(void) +{ + stm_thread_local_t *tl = stm_all_thread_locals; + do { + object_t **current = tl->shadowstack; + object_t **base = tl->shadowstack_base; + while (current-- != base) { + assert(*current != (object_t *)-1); + mark_record_trace(current); + } + mark_record_trace(&tl->thread_local_obj); + + tl = tl->next; + } while (tl != stm_all_thread_locals); +} + +static void mark_visit_all_objects(void) +{ + while (!list_is_empty(mark_objects_to_trace)) { + object_t *obj = (object_t *)list_pop_item(mark_objects_to_trace); + + stmcb_trace(mark_first_seg(obj), &mark_record_trace); + + if (!is_fully_in_shared_pages(obj)) { + abort();//xxx; + } + } +} + +static inline bool largemalloc_keep_object_at(char *data) +{ + /* this is called by largemalloc_sweep() */ + return mark_is_visited((object_t *)(data - stm_object_pages)); +} + +static void sweep_large_objects(void) +{ + largemalloc_sweep(); +} + static void major_collection_now_at_safe_point(void) { + dprintf(("\n")); dprintf((" .----- major_collection_now_at_safe_point -----\n")); assert(_has_mutex()); @@ -141,7 +227,23 @@ dprintf((" | used before collection: %ld\n", (long)pages_ctl.total_allocated)); - fprintf(stderr, "hi, I should be doing a major GC here\n"); + /* marking */ + mark_objects_to_trace = list_create(); + mark_collect_modified_objects(); + mark_collect_roots(); + mark_visit_all_objects(); + list_free(mark_objects_to_trace); + mark_objects_to_trace = NULL; + + /* sweeping */ + mutex_pages_lock(); + sweep_large_objects(); + //sweep_uniform_pages(); + mutex_pages_unlock(); + + dprintf((" | used after collection: %ld\n", + (long)pages_ctl.total_allocated)); + dprintf((" `----------------------------------------------\n")); reset_major_collection_requested(); } diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -35,6 +35,7 @@ static void major_collection_if_requested(void); static void major_collection_now_at_safe_point(void); +static bool largemalloc_keep_object_at(char *data); /* for largemalloc.c */ static char *_allocate_small_slowpath(uint64_t size); diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -60,6 +60,10 @@ assert(!(p->size & FLAG_SORTED)); return chunk_at_offset(p, CHUNK_HEADER_SIZE + p->size); } +static mchunk_t *next_chunk_a(mchunk_t *p) +{ + return chunk_at_offset(p, CHUNK_HEADER_SIZE + (p->size & ~FLAG_SORTED)); +} /* The free chunks are stored in "bins". Each bin is a doubly-linked @@ -420,3 +424,48 @@ } return 1; } + + +#ifdef STM_TESTS +bool (*_stm_largemalloc_keep)(char *data) = NULL; +#endif + +static inline bool _largemalloc_sweep_keep(mchunk_t *chunk) +{ +#ifdef STM_TESTS + if (_stm_largemalloc_keep != NULL) + return _stm_largemalloc_keep((char *)&chunk->d); +#endif + return largemalloc_keep_object_at((char *)&chunk->d); +} + +static void largemalloc_sweep(void) +{ + /* This may be slightly optimized by inlining _stm_large_free() and + making cases, e.g. we might know already if the previous block + was free or not. It's probably not really worth it. */ + mchunk_t *mnext, *chunk = first_chunk; + + if (chunk->prev_size == THIS_CHUNK_FREE) + chunk = next_chunk_a(chunk); /* go to the first non-free chunk */ + + while (chunk != last_chunk) { + + /* here, the chunk we're pointing to is not free */ + assert(chunk->prev_size != THIS_CHUNK_FREE); + + /* first figure out the next non-free chunk */ + mnext = next_chunk_u(chunk); + if (mnext->prev_size == THIS_CHUNK_FREE) + mnext = next_chunk_a(mnext); + + /* use the callback to know if 'chunk' contains an object that + survives or dies */ + if (!_largemalloc_sweep_keep(chunk)) { + size_t size = chunk->size; + _stm_large_free((char *)&chunk->d); /* dies */ + increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); + } + chunk = mnext; + } +} diff --git a/c7/stm/largemalloc.h b/c7/stm/largemalloc.h --- a/c7/stm/largemalloc.h +++ b/c7/stm/largemalloc.h @@ -13,5 +13,7 @@ void _stm_large_dump(void); +static void largemalloc_sweep(void); + #define LARGE_MALLOC_OVERHEAD (2 * sizeof(size_t)) /* estimate */ diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -197,7 +197,6 @@ mutex_pages_unlock(); } -#if 0 static bool is_fully_in_shared_pages(object_t *obj) { uintptr_t first_page = ((uintptr_t)obj) / 4096UL; @@ -217,4 +216,3 @@ return true; } -#endif diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -41,4 +41,4 @@ _pages_privatize(pagenum, count, full); } -//static bool is_fully_in_shared_pages(object_t *obj); -- not needed? +static bool is_fully_in_shared_pages(object_t *obj); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -49,7 +49,7 @@ PROT_NONE); struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(i + 1 < 256); + assert(i + 1 < 0xff); /* 0xff (255) is used by major collections */ pr->write_lock_num = i + 1; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -100,5 +100,4 @@ assert 5000 <= lib._stm_total_allocated() <= 8192 stm_major_collect() - py.test.skip("in-progress") assert lib._stm_total_allocated() == 0 From noreply at buildbot.pypy.org Sat Mar 1 18:40:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 18:40:36 +0100 (CET) Subject: [pypy-commit] pypy default: Crash early when trying to assign a resizable list into a 'lst[*]' Message-ID: <20140301174036.E6CC21C1504@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69574:492582e28528 Date: 2014-03-01 18:39 +0100 http://bitbucket.org/pypy/pypy/changeset/492582e28528/ Log: Crash early when trying to assign a resizable list into a 'lst[*]' attribute. diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3722,6 +3722,18 @@ a = self.RPythonAnnotator() py.test.raises(ListChangeUnallowed, a.build_types, f, [int]) + def test_immutable_list_is_assigned_a_resizable_list(self): + class A: + _immutable_fields_ = 'lst[*]' + def f(n): + a = A() + foo = [] + foo.append(n) + a.lst = foo + + a = self.RPythonAnnotator() + py.test.raises(ListChangeUnallowed, a.build_types, f, [int]) + def test_can_merge_immutable_list_with_regular_list(self): class A: _immutable_fields_ = 'lst[*]' diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -676,6 +676,12 @@ # create or update the attribute in clsdef clsdef.generalize_attr(attr, s_value) + if isinstance(s_value, SomeList): + clsdef.classdesc.maybe_return_immutable_list( + attr, s_value) + else: + raise AnnotatorError("setattr(instance, variable_attr, value)") + def bool_behavior(self, s): if not self.can_be_None: s.const = True From noreply at buildbot.pypy.org Sat Mar 1 18:57:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 18:57:43 +0100 (CET) Subject: [pypy-commit] stmgc default: Next test Message-ID: <20140301175743.0B3261C3427@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r909:b8042cd71968 Date: 2014-03-01 17:14 +0100 http://bitbucket.org/pypy/stmgc/changeset/b8042cd71968/ Log: Next test diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -463,8 +463,8 @@ survives or dies */ if (!_largemalloc_sweep_keep(chunk)) { size_t size = chunk->size; + increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); _stm_large_free((char *)&chunk->d); /* dies */ - increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); } chunk = mnext; } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -77,7 +77,7 @@ GC_N_SMALL_REQUESTS = 36 # from gcpage.c SHARED_PAGE = 1 # from pages.h PRIVATE_PAGE = 3 # from pages.h - +LARGE_MALLOC_OVERHEAD = 16 # from largemalloc.h lib = ffi.verify(''' #include diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -1,6 +1,10 @@ from support import * import py + +LMO = LARGE_MALLOC_OVERHEAD + + class TestGCPage(BaseTest): def test_large_obj_alloc(self): @@ -93,11 +97,29 @@ new = stm_allocate(5000) self.push_root(new) stm_minor_collect() - assert 5000 <= lib._stm_total_allocated() <= 8192 + assert lib._stm_total_allocated() == 5000 + LMO self.pop_root() stm_minor_collect() - assert 5000 <= lib._stm_total_allocated() <= 8192 + assert lib._stm_total_allocated() == 5000 + LMO stm_major_collect() assert lib._stm_total_allocated() == 0 + + def test_mark_recursive(self): + def make_chain(sz): + prev = ffi.cast("object_t *", ffi.NULL) + for i in range(10): + self.push_root(prev) + new = stm_allocate_refs(sz/8-1) + prev = self.pop_root() + stm_set_ref(new, 42, prev) + prev = new + return new + + self.start_transaction() + self.push_root(make_chain(5000)) + self.push_root(make_chain(4312)) + stm_minor_collect() + assert lib._stm_total_allocated() == (10 * (5000 + LMO) + + 10 * (4312 + LMO)) From noreply at buildbot.pypy.org Sat Mar 1 18:57:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 18:57:44 +0100 (CET) Subject: [pypy-commit] stmgc default: Test that largemalloc_sweep() works as expected; start to work on recording modified objects Message-ID: <20140301175744.289FF1C3427@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r910:6f5c7ae1e5eb Date: 2014-03-01 18:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/6f5c7ae1e5eb/ Log: Test that largemalloc_sweep() works as expected; start to work on recording modified objects diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -136,24 +136,84 @@ static struct list_s *mark_objects_to_trace; +#define WL_VISITED 42 -static inline struct object_s *mark_first_seg(object_t *obj) + +static inline uintptr_t mark_loc(object_t *obj) { - return (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); + uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; + assert(lock_idx >= 0); + assert(lock_idx < sizeof(write_locks)); + return lock_idx; } static inline bool mark_is_visited(object_t *obj) { - uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; - assert(lock_idx >= 0); - assert(lock_idx < sizeof(write_locks)); + uintptr_t lock_idx = mark_loc(obj); + assert(write_locks[lock_idx] == 0 || write_locks[lock_idx] == WL_VISITED); return write_locks[lock_idx] != 0; } static inline void mark_set_visited(object_t *obj) { - uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; - write_locks[lock_idx] = 0xff; + uintptr_t lock_idx = mark_loc(obj); + write_locks[lock_idx] = WL_VISITED; +} + +static void mark_record_modified_objects(void) +{ + /* The modified objects are the ones that may exist in two different + versions: one in the segment that modified it, and another in + all other segments. */ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + char *base1 = get_segment_base(i); /* two different segments */ + char *base2 = get_segment_base(!i); + + LIST_FOREACH_R( + pseg->modified_old_objects, + object_t * /*item*/, + ({ + assert(item != NULL); + + uintptr_t lock_idx = mark_loc(item); + assert(write_locks[lock_idx] == pseg->write_lock_num); + + write_locks[lock_idx] = WL_VISITED; + LIST_APPEND(mark_objects_to_trace, REAL_ADDRESS(base1, item)); + LIST_APPEND(mark_objects_to_trace, REAL_ADDRESS(base2, item)); + })); + } +} + +static void reset_write_locks(void) +{ + /* the write_locks array, containing the visit marker during + major collection, is cleared now, with two memsets (to avoid + clearing the never-used range in the middle corresponding to + uninitialized pages) */ + object_t *loc1 = (object_t *)(uninitialized_page_start - stm_object_pages); + object_t *loc2 = (object_t *)(uninitialized_page_stop - stm_object_pages); + uintptr_t lock1_idx = mark_loc(loc1); + uintptr_t lock2_idx = mark_loc(loc2 - 1) + 1; + + memset(write_locks, 0, lock1_idx); + memset(write_locks + lock2_idx, 0, sizeof(write_locks) - lock2_idx); + + /* restore the write locks on the modified objects */ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + + LIST_FOREACH_R( + pseg->modified_old_objects, + object_t * /*item*/, + ({ + uintptr_t lock_idx = mark_loc(item); + write_locks[lock_idx] = pseg->write_lock_num; + })); + } } static inline void mark_record_trace(object_t **pobj) @@ -167,12 +227,7 @@ return; /* already visited this object */ mark_set_visited(obj); - LIST_APPEND(mark_objects_to_trace, obj); -} - -static void mark_collect_modified_objects(void) -{ - //... + LIST_APPEND(mark_objects_to_trace, REAL_ADDRESS(stm_object_pages, obj)); } static void mark_collect_roots(void) @@ -194,25 +249,21 @@ static void mark_visit_all_objects(void) { while (!list_is_empty(mark_objects_to_trace)) { - object_t *obj = (object_t *)list_pop_item(mark_objects_to_trace); - - stmcb_trace(mark_first_seg(obj), &mark_record_trace); - - if (!is_fully_in_shared_pages(obj)) { - abort();//xxx; - } + struct object_s *obj = + (struct object_s *)list_pop_item(mark_objects_to_trace); + stmcb_trace(obj, &mark_record_trace); } } static inline bool largemalloc_keep_object_at(char *data) { - /* this is called by largemalloc_sweep() */ + /* this is called by _stm_largemalloc_sweep() */ return mark_is_visited((object_t *)(data - stm_object_pages)); } static void sweep_large_objects(void) { - largemalloc_sweep(); + _stm_largemalloc_sweep(); } static void major_collection_now_at_safe_point(void) @@ -229,7 +280,7 @@ /* marking */ mark_objects_to_trace = list_create(); - mark_collect_modified_objects(); + mark_record_modified_objects(); mark_collect_roots(); mark_visit_all_objects(); list_free(mark_objects_to_trace); @@ -241,6 +292,8 @@ //sweep_uniform_pages(); mutex_pages_unlock(); + reset_write_locks(); + dprintf((" | used after collection: %ld\n", (long)pages_ctl.total_allocated)); dprintf((" `----------------------------------------------\n")); diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -348,6 +348,10 @@ return (char *)first_chunk; } +#ifdef STM_TESTS +bool (*_stm_largemalloc_keep)(char *data); /* a hook for tests */ +#endif + void _stm_largemalloc_init_arena(char *data_start, size_t data_size) { int i; @@ -367,6 +371,10 @@ assert(last_chunk == next_chunk_u(first_chunk)); insert_unsorted(first_chunk); + +#ifdef STM_TESTS + _stm_largemalloc_keep = NULL; +#endif } int _stm_largemalloc_resize_arena(size_t new_size) @@ -426,10 +434,6 @@ } -#ifdef STM_TESTS -bool (*_stm_largemalloc_keep)(char *data) = NULL; -#endif - static inline bool _largemalloc_sweep_keep(mchunk_t *chunk) { #ifdef STM_TESTS @@ -439,7 +443,7 @@ return largemalloc_keep_object_at((char *)&chunk->d); } -static void largemalloc_sweep(void) +void _stm_largemalloc_sweep(void) { /* This may be slightly optimized by inlining _stm_large_free() and making cases, e.g. we might know already if the previous block diff --git a/c7/stm/largemalloc.h b/c7/stm/largemalloc.h --- a/c7/stm/largemalloc.h +++ b/c7/stm/largemalloc.h @@ -10,10 +10,9 @@ major collections, which have their own synchronization mecanisms. */ char *_stm_large_malloc(size_t request_size); void _stm_large_free(char *data); +void _stm_largemalloc_sweep(void); void _stm_large_dump(void); -static void largemalloc_sweep(void); - #define LARGE_MALLOC_OVERHEAD (2 * sizeof(size_t)) /* estimate */ diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -47,7 +47,6 @@ static uint64_t increment_total_allocated(ssize_t add_or_remove) { - assert(_has_mutex_pages()); pages_ctl.total_allocated += add_or_remove; if (pages_ctl.total_allocated >= pages_ctl.total_allocated_bound) @@ -197,6 +196,7 @@ mutex_pages_unlock(); } +#if 0 static bool is_fully_in_shared_pages(object_t *obj) { uintptr_t first_page = ((uintptr_t)obj) / 4096UL; @@ -216,3 +216,4 @@ return true; } +#endif diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -41,4 +41,4 @@ _pages_privatize(pagenum, count, full); } -static bool is_fully_in_shared_pages(object_t *obj); +/* static bool is_fully_in_shared_pages(object_t *obj); */ diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -49,7 +49,7 @@ PROT_NONE); struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(i + 1 < 0xff); /* 0xff (255) is used by major collections */ + assert(i + 1 <= 255); pr->write_lock_num = i + 1; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -85,6 +85,8 @@ char *_stm_large_malloc(size_t request_size); void _stm_large_free(char *data); void _stm_large_dump(void); +bool (*_stm_largemalloc_keep)(char *data); +void _stm_largemalloc_sweep(void); void _stm_start_safe_point(void); void _stm_stop_safe_point(void); void _stm_set_nursery_free_count(uint64_t free_count); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -61,6 +61,8 @@ void _stm_large_free(char *data); void _stm_large_dump(void); void *memset(void *s, int c, size_t n); +bool (*_stm_largemalloc_keep)(char *data); +void _stm_largemalloc_sweep(void); ssize_t stmcb_size_rounded_up(struct object_s *obj); diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -115,7 +115,7 @@ prev = self.pop_root() stm_set_ref(new, 42, prev) prev = new - return new + return prev self.start_transaction() self.push_root(make_chain(5000)) @@ -123,3 +123,15 @@ stm_minor_collect() assert lib._stm_total_allocated() == (10 * (5000 + LMO) + 10 * (4312 + LMO)) + stm_major_collect() + assert lib._stm_total_allocated() == (10 * (5000 + LMO) + + 10 * (4312 + LMO)) + stm_major_collect() + assert lib._stm_total_allocated() == (10 * (5000 + LMO) + + 10 * (4312 + LMO)) + self.pop_root() + stm_major_collect() + assert lib._stm_total_allocated() == 10 * (5000 + LMO) + + def test_trace_all_versions(self): + pass #xxx in-progress diff --git a/c7/test/test_largemalloc.py b/c7/test/test_largemalloc.py --- a/c7/test/test_largemalloc.py +++ b/c7/test/test_largemalloc.py @@ -118,3 +118,44 @@ ra(d)[sz - 1] = content2 p.append((d, sz, content1, content2)) lib._stm_large_dump() + + def test_random_largemalloc_sweep(self): + @ffi.callback("bool(char *)") + def keep(data): + try: + if data in from_before: + return False + index = all.index(data) + seen_for.add(index) + return index in keep_me + except Exception, e: + errors.append(e) + raise + lib._stm_largemalloc_keep = keep + errors = [] + from_before = set() + + r = random.Random(1000) + for j in range(50): + sizes = [random.choice(range(104, 500, 8)) for i in range(20)] + all = [lib._stm_large_malloc(size) for size in sizes] + print all + + keep_me = set() + for i in range(len(all)): + if r.random() < 0.5: + print 'free:', all[i] + lib._stm_large_free(all[i]) + all[i] = None + elif r.random() < 0.5: + keep_me.add(i) + + seen_for = set() + lib._stm_largemalloc_sweep() + assert seen_for == set([i for i in range(len(all)) + if all[i] is not None]) + lib._stm_large_dump() + from_before = [all[i] for i in keep_me] + + if errors: + raise errors[0] From noreply at buildbot.pypy.org Sat Mar 1 19:01:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 19:01:38 +0100 (CET) Subject: [pypy-commit] stmgc default: A passing test Message-ID: <20140301180138.6820D1D265E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r911:a4aac2f3be09 Date: 2014-03-01 19:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/a4aac2f3be09/ Log: A passing test diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -134,4 +134,27 @@ assert lib._stm_total_allocated() == 10 * (5000 + LMO) def test_trace_all_versions(self): - pass #xxx in-progress + self.start_transaction() + x = stm_allocate(5000) + stm_set_char(x, 'A') + self.push_root(x) + self.commit_transaction() + assert lib._stm_total_allocated() == 5000 + LMO + + self.start_transaction() + x = self.pop_root() + self.push_root(x) + assert lib._stm_total_allocated() == 5000 + LMO + stm_set_char(x, 'B') + assert lib._stm_total_allocated() == 5000 + LMO + 2 * 4096 # 2 pages + stm_major_collect() + + assert stm_get_char(x) == 'B' + + self.switch(1) + self.start_transaction() + assert stm_get_char(x) == 'A' + + self.switch(0) + assert stm_get_char(x) == 'B' + assert lib._stm_total_allocated() == 5000 + LMO + 2 * 4096 # 2 pages From noreply at buildbot.pypy.org Sat Mar 1 20:04:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 20:04:15 +0100 (CET) Subject: [pypy-commit] stmgc default: Small refactoring Message-ID: <20140301190415.138C11C3427@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r912:f12f3ade6f01 Date: 2014-03-01 19:15 +0100 http://bitbucket.org/pypy/stmgc/changeset/f12f3ade6f01/ Log: Small refactoring diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -147,17 +147,30 @@ return lock_idx; } -static inline bool mark_is_visited(object_t *obj) +static inline bool mark_visited_test_and_set(object_t *obj) { uintptr_t lock_idx = mark_loc(obj); assert(write_locks[lock_idx] == 0 || write_locks[lock_idx] == WL_VISITED); - return write_locks[lock_idx] != 0; + if (write_locks[lock_idx] != 0) { + return true; + } + else { + write_locks[lock_idx] = WL_VISITED; + return false; + } } -static inline void mark_set_visited(object_t *obj) +static inline bool mark_visited_test_and_clear(object_t *obj) { uintptr_t lock_idx = mark_loc(obj); - write_locks[lock_idx] = WL_VISITED; + assert(write_locks[lock_idx] == 0 || write_locks[lock_idx] == WL_VISITED); + if (write_locks[lock_idx] != 0) { + write_locks[lock_idx] = 0; + return true; + } + else { + return false; + } } static void mark_record_modified_objects(void) @@ -190,15 +203,19 @@ static void reset_write_locks(void) { /* the write_locks array, containing the visit marker during - major collection, is cleared now, with two memsets (to avoid - clearing the never-used range in the middle corresponding to - uninitialized pages) */ - object_t *loc1 = (object_t *)(uninitialized_page_start - stm_object_pages); + major collection, is cleared in sweep_large_objects() for + large objects, but is not cleared for small objects. + Clear it now. */ object_t *loc2 = (object_t *)(uninitialized_page_stop - stm_object_pages); - uintptr_t lock1_idx = mark_loc(loc1); uintptr_t lock2_idx = mark_loc(loc2 - 1) + 1; - memset(write_locks, 0, lock1_idx); +#ifdef STM_TESTS + long _i; + for (_i=0; _i Author: Armin Rigo Branch: Changeset: r913:e7ae3f43dde8 Date: 2014-03-01 20:03 +0100 http://bitbucket.org/pypy/stmgc/changeset/e7ae3f43dde8/ Log: in-progress. still a bug left diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -236,6 +236,7 @@ char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + assert(obj_size >= 16); uintptr_t start = (uintptr_t)obj; uintptr_t end = start + obj_size; uintptr_t first_page = start / 4096UL; diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -95,6 +95,8 @@ return addr; } +static struct list_s *testing_prebuilt_objs = NULL; + object_t *_stm_allocate_old(ssize_t size_rounded_up) { /* only for tests */ @@ -103,6 +105,11 @@ object_t *o = (object_t *)(p - stm_object_pages); o->stm_flags = STM_FLAGS_PREBUILT; + + if (testing_prebuilt_objs == NULL) + testing_prebuilt_objs = list_create(); + LIST_APPEND(testing_prebuilt_objs, o); + return o; } @@ -147,6 +154,13 @@ return lock_idx; } +static inline bool mark_visited_test(object_t *obj) +{ + uintptr_t lock_idx = mark_loc(obj); + assert(write_locks[lock_idx] == 0 || write_locks[lock_idx] == WL_VISITED); + return write_locks[lock_idx] != 0; +} + static inline bool mark_visited_test_and_set(object_t *obj) { uintptr_t lock_idx = mark_loc(obj); @@ -228,6 +242,7 @@ object_t * /*item*/, ({ uintptr_t lock_idx = mark_loc(item); + assert(write_locks[lock_idx] == 0); write_locks[lock_idx] = pseg->write_lock_num; })); } @@ -261,6 +276,9 @@ tl = tl->next; } while (tl != stm_all_thread_locals); + + LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/, + mark_record_trace(&item)); } static void mark_visit_all_objects(void) @@ -283,6 +301,43 @@ _stm_largemalloc_sweep(); } +static void clean_up_segment_lists(void) +{ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + struct list_s *lst; + + /* 'objects_pointing_to_nursery' should be empty, but isn't + necessarily because it also lists objects that have been + written to but don't actually point to the nursery. Clear + it up and set GCFLAG_WRITE_BARRIER again on the objects. */ + lst = pseg->objects_pointing_to_nursery; + if (lst != NULL) { + LIST_FOREACH_R(lst, uintptr_t /*item*/, + ({ + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(pseg->pub.segment_base, item); + assert(!(realobj->stm_flags & GCFLAG_WRITE_BARRIER)); + realobj->stm_flags |= GCFLAG_WRITE_BARRIER; + })); + list_clear(lst); + } + + /* Remove from 'large_overflow_objects' all objects that die */ + lst = pseg->large_overflow_objects; + if (lst != NULL) { + uintptr_t n = list_count(lst); + while (n > 0) { + object_t *obj = (object_t *)list_item(lst, --n); + if (!mark_visited_test(obj)) { + list_set_item(lst, n, list_pop_item(lst)); + } + } + } + } +} + static void major_collection_now_at_safe_point(void) { dprintf(("\n")); @@ -303,6 +358,9 @@ list_free(mark_objects_to_trace); mark_objects_to_trace = NULL; + /* cleanup */ + clean_up_segment_lists(); + /* sweeping */ mutex_pages_lock(); sweep_large_objects(); diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -251,6 +251,12 @@ assert((chunk->size & (sizeof(char *) - 1)) == 0); assert(chunk->prev_size != THIS_CHUNK_FREE); +#ifndef NDEBUG + assert(chunk->size >= sizeof(dlist_t)); + assert(chunk->size <= (((char *)last_chunk) - (char *)data)); + memset(data, 0xDD, chunk->size); +#endif + /* try to merge with the following chunk in memory */ size_t msize = chunk->size + CHUNK_HEADER_SIZE; mchunk_t *mscan = chunk_at_offset(chunk, msize); diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -58,6 +58,12 @@ return lst->items[index]; } +static inline void list_set_item(struct list_s *lst, uintptr_t index, + uintptr_t newitem) +{ + lst->items[index] = newitem; +} + #define LIST_FOREACH_R(lst, TYPE, CODE) \ do { \ struct list_s *_lst = (lst); \ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -217,7 +217,7 @@ return myobj->type_id - 42; } else { - int nrefs = myobj->type_id - 421420; + uint64_t nrefs = myobj->type_id - 421420; assert(nrefs < 10000); /* artificial limit, to check for garbage */ if (nrefs == 0) /* weakrefs */ nrefs = 1; diff --git a/c7/test/test_largemalloc.py b/c7/test/test_largemalloc.py --- a/c7/test/test_largemalloc.py +++ b/c7/test/test_largemalloc.py @@ -136,11 +136,15 @@ from_before = set() r = random.Random(1000) - for j in range(50): + for j in range(500): sizes = [random.choice(range(104, 500, 8)) for i in range(20)] all = [lib._stm_large_malloc(size) for size in sizes] print all + for i in range(len(all)): + all[i][50] = chr(65 + i) + all_orig = all[:] + keep_me = set() for i in range(len(all)): if r.random() < 0.5: @@ -152,10 +156,16 @@ seen_for = set() lib._stm_largemalloc_sweep() + if errors: + raise errors[0] assert seen_for == set([i for i in range(len(all)) if all[i] is not None]) lib._stm_large_dump() + from_before = [all[i] for i in keep_me] - if errors: - raise errors[0] + for i in range(len(all)): + if i in keep_me: + assert all[i][50] == chr(65 + i) + else: + assert all_orig[i][50] == '\xDD' diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -384,6 +384,12 @@ thread_state.pop_roots(ex) thread_state.reload_roots(ex) +def op_major_collect(ex, global_state, thread_state): + thread_state.push_roots(ex) + ex.do('stm_major_collect()') + thread_state.pop_roots(ex) + thread_state.reload_roots(ex) + def op_forget_root(ex, global_state, thread_state): r = thread_state.forget_random_root() @@ -566,6 +572,7 @@ op_assert_size, op_assert_modified, op_minor_collect, + op_major_collect, ] for _ in range(200): # make sure we are in a transaction: From noreply at buildbot.pypy.org Sat Mar 1 20:16:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 20:16:17 +0100 (CET) Subject: [pypy-commit] cffi default: Two "decrefs" that are very theoretically missing (but it's not like Message-ID: <20140301191617.4427E1C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1469:da03a09a3bc4 Date: 2014-03-01 20:16 +0100 http://bitbucket.org/cffi/cffi/changeset/da03a09a3bc4/ Log: Two "decrefs" that are very theoretically missing (but it's not like you can actually free the object or win a lot) diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -897,11 +897,13 @@ if (c_api_object == NULL) return; if (!PyCapsule_CheckExact(c_api_object)) { + Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); return; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + Py_DECREF(c_api_object); } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) From noreply at buildbot.pypy.org Sat Mar 1 20:53:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 20:53:32 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix, but a failing test. Message-ID: <20140301195332.0B6DC1C1504@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r914:5d777b5b2815 Date: 2014-03-01 20:53 +0100 http://bitbucket.org/pypy/stmgc/changeset/5d777b5b2815/ Log: Fix, but a failing test. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -277,8 +277,10 @@ tl = tl->next; } while (tl != stm_all_thread_locals); - LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/, - mark_record_trace(&item)); + if (testing_prebuilt_objs != NULL) { + LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/, + mark_record_trace(&item)); + } } static void mark_visit_all_objects(void) diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -183,7 +183,7 @@ void _set_ptr(object_t *obj, int n, object_t *v) { - int nrefs = ((myobj_t*)obj)->type_id - 421420; + long nrefs = (long)((myobj_t*)obj)->type_id - 421420; assert(n < nrefs); stm_char *field_addr = ((stm_char*)obj); @@ -195,7 +195,7 @@ object_t * _get_ptr(object_t *obj, int n) { - int nrefs = ((myobj_t*)obj)->type_id - 421420; + long nrefs = (long)((myobj_t*)obj)->type_id - 421420; assert(n < nrefs); stm_char *field_addr = ((stm_char*)obj); @@ -300,10 +300,12 @@ def stm_set_char(obj, c, offset=HDR): stm_write(obj) + assert HDR <= offset < stm_get_obj_size(obj) stm_get_real_address(obj)[offset] = c def stm_get_char(obj, offset=HDR): stm_read(obj) + assert HDR <= offset < stm_get_obj_size(obj) return stm_get_real_address(obj)[offset] def stm_get_real_address(obj): diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -137,6 +137,7 @@ self.start_transaction() x = stm_allocate(5000) stm_set_char(x, 'A') + stm_set_char(x, 'a', 4999) self.push_root(x) self.commit_transaction() assert lib._stm_total_allocated() == 5000 + LMO @@ -146,15 +147,37 @@ self.push_root(x) assert lib._stm_total_allocated() == 5000 + LMO stm_set_char(x, 'B') + stm_set_char(x, 'b', 4999) assert lib._stm_total_allocated() == 5000 + LMO + 2 * 4096 # 2 pages stm_major_collect() - assert stm_get_char(x) == 'B' + assert stm_get_char(x) == 'B' + assert stm_get_char(x, 4999) == 'b' self.switch(1) self.start_transaction() - assert stm_get_char(x) == 'A' + assert stm_get_char(x) == 'A' + assert stm_get_char(x, 4999) == 'a' self.switch(0) - assert stm_get_char(x) == 'B' + assert stm_get_char(x) == 'B' + assert stm_get_char(x, 4999) == 'b' assert lib._stm_total_allocated() == 5000 + LMO + 2 * 4096 # 2 pages + + def test_trace_correct_version_of_overflow_objects_1(self, size=32): + self.start_transaction() + # + self.switch(1) + self.start_transaction() + x = stm_allocate(size) + stm_set_char(x, 'E', size - 1) + self.push_root(x) + # + self.switch(0) + stm_major_collect() + # + self.switch(1) + assert stm_get_char(x, size - 1) == 'E' + + def test_trace_correct_version_of_overflow_objects_2(self): + self.test_trace_correct_version_of_overflow_objects_1(size=5000) From noreply at buildbot.pypy.org Sat Mar 1 21:30:57 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 21:30:57 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.5: merge default Message-ID: <20140301203057.ED93F1C1504@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.5 Changeset: r69575:2480f8a25459 Date: 2014-03-01 15:22 -0500 http://bitbucket.org/pypy/pypy/changeset/2480f8a25459/ Log: merge default diff too long, truncating to 2000 out of 75605 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -33,7 +33,7 @@ $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/dotviewer/graphdisplay.py b/dotviewer/graphdisplay.py --- a/dotviewer/graphdisplay.py +++ b/dotviewer/graphdisplay.py @@ -136,6 +136,7 @@ Click on objects to move around Drag with the left mouse button to zoom in/out Drag with the right mouse button to scroll + Use scroll wheel do scroll up or down """.replace('\n ', '\n').strip() # poor man's dedent @@ -613,6 +614,19 @@ def process_MouseButtonUp(self, event): self.dragging = None pygame.event.set_grab(False) + # handle directional scrolling + if event.button == 4: + self.pan((0, -1)) + return + if event.button == 5: + self.pan((0, 1)) + return + if event.button == 6: + self.pan((-1, 0)) + return + if event.button == 7: + self.pan((1, 0)) + return if self.click_time is not None and abs(time.time() - self.click_time) < 1: # click (no significant dragging) self.notifyclick(self.click_origin) diff --git a/include/PyPy.h b/include/PyPy.h new file mode 100644 --- /dev/null +++ b/include/PyPy.h @@ -0,0 +1,60 @@ +#ifndef _PYPY_H_ +#define _PYPY_H_ + +/* This header is meant to be included in programs that use PyPy as an + embedded library. */ + +#ifdef __cplusplus +extern "C" { +#endif + +// call this first +void rpython_startup_code(void); + +// pypy_init_threads has to be called in case you want to use threads +void pypy_init_threads(void); + +/* Initialize the home directory of PyPy. It is necessary to call this. + + Call it with "home" being the file name of the libpypy.so, for + example; it will be used as a starting point when searching for the + lib-python and lib_pypy directories. They are searched from + "home/..", "home/../..", etc. Returns 0 if everything was fine. If + an error occurs, returns 1 and (if verbose != 0) prints some + information to stderr. + */ +int pypy_setup_home(char *home, int verbose); + + +/* If your program has multiple threads, then you need to call + pypy_thread_attach() once in each other thread that just started + and in which you want to run Python code (including via callbacks, + see below). DO NOT CALL IT IN THE MAIN THREAD + */ +void pypy_thread_attach(void); + + +/* The main entry point: executes "source" as plain Python code. + Returns 0 if everything was fine. If a Python exception is + uncaught, it is printed to stderr and 1 is returned. + + Usually, the Python code from "source" should use cffi to fill in + global variables of "function pointer" type in your program. Use + cffi callbacks to do so. Once it is done, there is no need to call + pypy_execute_source() any more: from C, you call directly the + functions (which are "callbacks" from the point of view of Python). + */ +int pypy_execute_source(char *source); + +/* a similar function, but inside Python code it'll register + a magic argument c_argument as int, which will be passed as void* from C. + Useful for passing pointers to arbitrary structs that contain callbacks + to register */ +int pypy_execute_source_ptr(char *source, void* ptr); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -105,7 +105,6 @@ self.assertEqual(ArgType, type(parm)) - @xfail def test_floats(self): # c_float and c_double can be created from # Python int, long and float diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -198,7 +213,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/test/test_audioop.py b/lib-python/2.7/test/test_audioop.py --- a/lib-python/2.7/test/test_audioop.py +++ b/lib-python/2.7/test/test_audioop.py @@ -2,7 +2,7 @@ import sys import unittest import struct -from test.test_support import run_unittest +from test.test_support import run_unittest, impl_detail formats = { @@ -183,6 +183,7 @@ self.assertEqual(audioop.lin2lin(datas[4], 4, 2), packs[2](0, 0x1234, 0x4567, -0x4568, 0x7fff, -0x8000, -1)) + @impl_detail(pypy=False) def test_adpcm2lin(self): self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 1, None), (b'\x00\x00\x00\xff\x00\xff', (-179, 40))) @@ -197,6 +198,7 @@ self.assertEqual(audioop.adpcm2lin(b'\0' * 5, w, None), (b'\0' * w * 10, (0, 0))) + @impl_detail(pypy=False) def test_lin2adpcm(self): self.assertEqual(audioop.lin2adpcm(datas[1], 1, None), (b'\x07\x7f\x7f', (-221, 39))) @@ -210,6 +212,7 @@ self.assertEqual(audioop.lin2adpcm(b'\0' * w * 10, w, None), (b'\0' * 5, (0, 0))) + @impl_detail(pypy=False) def test_lin2alaw(self): self.assertEqual(audioop.lin2alaw(datas[1], 1), b'\xd5\x87\xa4\x24\xaa\x2a\x5a') @@ -218,6 +221,7 @@ self.assertEqual(audioop.lin2alaw(datas[4], 4), b'\xd5\x87\xa4\x24\xaa\x2a\x55') + @impl_detail(pypy=False) def test_alaw2lin(self): encoded = b'\x00\x03\x24\x2a\x51\x54\x55\x58\x6b\x71\x7f'\ b'\x80\x83\xa4\xaa\xd1\xd4\xd5\xd8\xeb\xf1\xff' @@ -232,6 +236,7 @@ decoded = audioop.alaw2lin(encoded, w) self.assertEqual(audioop.lin2alaw(decoded, w), encoded) + @impl_detail(pypy=False) def test_lin2ulaw(self): self.assertEqual(audioop.lin2ulaw(datas[1], 1), b'\xff\xad\x8e\x0e\x80\x00\x67') @@ -240,6 +245,7 @@ self.assertEqual(audioop.lin2ulaw(datas[4], 4), b'\xff\xad\x8e\x0e\x80\x00\x7e') + @impl_detail(pypy=False) def test_ulaw2lin(self): encoded = b'\x00\x0e\x28\x3f\x57\x6a\x76\x7c\x7e\x7f'\ b'\x80\x8e\xa8\xbf\xd7\xea\xf6\xfc\xfe\xff' @@ -354,6 +360,7 @@ self.assertRaises(audioop.error, audioop.findmax, ''.join( chr(x) for x in xrange(256)), -2392392) + @impl_detail(pypy=False) def test_issue7673(self): state = None for data, size in INVALID_DATA: @@ -378,6 +385,7 @@ self.assertRaises(audioop.error, audioop.lin2alaw, data, size) self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state) + @impl_detail(pypy=False) def test_wrongsize(self): data = b'abcdefgh' state = None diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -11,7 +11,7 @@ def setUp(self): if os.path.exists(TESTFN): - os.unlink(TESTFN) + unlink(TESTFN) def tearDown(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): @@ -129,9 +129,13 @@ fp = os.tmpfile() except OSError, second: self.assertEqual(first.args, second.args) + return else: - self.fail("expected os.tmpfile() to raise OSError") - return + if test_support.check_impl_detail(pypy=False): + self.fail("expected os.tmpfile() to raise OSError") + # on PyPy, os.tmpfile() uses the tempfile module + # anyway, so works even if we cannot write in root. + fp.close() else: # open() worked, therefore, tmpfile() should work. Close our # dummy file and proceed with the test as normal. diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -1018,7 +1018,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -338,7 +338,7 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").write("") + open(empty, "wb").close() try: tar = object.__new__(tarfile.TarFile) @@ -349,7 +349,7 @@ else: self.fail("ReadError not raised") finally: - os.remove(empty) + test_support.unlink(empty) def test_parallel_iteration(self): # Issue #16601: Restarting iteration over tarfile continued @@ -1333,7 +1333,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): tar = tarfile.open(self.tarname, "a", fileobj=fileobj) diff --git a/lib-python/2.7/threading.py b/lib-python/2.7/threading.py --- a/lib-python/2.7/threading.py +++ b/lib-python/2.7/threading.py @@ -340,22 +340,18 @@ if __debug__: self._note("%s.wait(): got it", self) else: - # Balancing act: We can't afford a pure busy loop, so we - # have to sleep; but if we sleep the whole timeout time, - # we'll be unresponsive. The scheme here sleeps very - # little at first, longer as time goes on, but never longer - # than 20 times per second (or the timeout time remaining). - endtime = _time() + timeout - delay = 0.0005 # 500 us -> initial delay of 1 ms - while True: - gotit = waiter.acquire(0) - if gotit: - break - remaining = endtime - _time() - if remaining <= 0: - break - delay = min(delay * 2, remaining, .05) - _sleep(delay) + # PyPy patch: use _py3k_acquire() + if timeout > 0: + try: + gotit = waiter._py3k_acquire(True, timeout) + except OverflowError: + # bah, in Python 3, acquire(True, timeout) raises + # OverflowError if the timeout is too huge. For + # forward-compatibility reasons we do the same. + waiter.acquire() + gotit = True + else: + gotit = waiter.acquire(False) if not gotit: if __debug__: self._note("%s.wait(%s): timed out", self, timeout) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -110,7 +110,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py'), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof @@ -20,10 +20,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + self._buffer[len(val)] = '\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -33,8 +36,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -45,10 +47,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, unicode): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = u'\x00' res.value = property(getvalue, setvalue) if '_length_' in typedict: diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys try: from __pypy__ import builtinify diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -5,7 +5,7 @@ from _ctypes.basics import is_struct_shape from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys import traceback @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects from _ctypes.basics import sizeof, byref, as_ffi_pointer diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi import weakref import sys diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -2,6 +2,8 @@ import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject +from _ctypes.array import Array +from _ctypes.pointer import _Pointer import inspect def names_and_fields(self, _fields_, superclass, anonymous_fields=None): @@ -104,8 +106,11 @@ def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(self.num) + key = keepalive_key(self.num) + if issubclass(fieldtype, _Pointer) and isinstance(cobj, Array): + # if our value is an Array we need the whole thing alive + store_reference(obj, key, cobj) + elif ensure_objects(cobj) is not None: store_reference(obj, key, cobj._objects) arg = cobj._get_buffer_value() if fieldtype._fficompositesize is not None: diff --git a/lib_pypy/_ffi.py b/lib_pypy/_ffi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ffi.py @@ -0,0 +1,2 @@ +# Backward compatibility hack +from _rawffi.alt import * diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', @@ -322,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -993,13 +1009,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1022,12 +1043,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1177,11 +1203,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1194,7 +1228,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1313,7 +1347,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,14 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,546 @@ +import __builtin__ as builtins +import math +import struct +from fractions import gcd +from ctypes import create_string_buffer + + +_buffer = buffer + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def _sample_count(cp, size): + return len(cp) / size + + +def _get_samples(cp, size, signed=True): + for i in range(_sample_count(cp, size)): + yield _get_sample(cp, size, i, signed) + + +def _struct_format(size, signed): + if size == 1: + return "b" if signed else "B" + elif size == 2: + return "h" if signed else "H" + elif size == 4: + return "i" if signed else "I" + + +def _get_sample(cp, size, i, signed=True): + fmt = _struct_format(size, signed) + start = i * size + end = start + size + return struct.unpack_from(fmt, _buffer(cp)[start:end])[0] + + +def _put_sample(cp, size, i, val, signed=True): + fmt = _struct_format(size, signed) + struct.pack_into(fmt, cp, i * size, val) + + +def _get_maxval(size, signed=True): + if signed and size == 1: + return 0x7f + elif size == 1: + return 0xff + elif signed and size == 2: + return 0x7fff + elif size == 2: + return 0xffff + elif signed and size == 4: + return 0x7fffffff + elif size == 4: + return 0xffffffff + + +def _get_minval(size, signed=True): + if not signed: + return 0 + elif size == 1: + return -0x80 + elif size == 2: + return -0x8000 + elif size == 4: + return -0x80000000 + + +def _get_clipfn(size, signed=True): + maxval = _get_maxval(size, signed) + minval = _get_minval(size, signed) + return lambda val: builtins.max(min(val, maxval), minval) + + +def _overflow(val, size, signed=True): + minval = _get_minval(size, signed) + maxval = _get_maxval(size, signed) + if minval <= val <= maxval: + return val + + bits = size * 8 + if signed: + offset = 2**(bits-1) + return ((val + offset) % (2**bits)) - offset + else: + return val % (2**bits) + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + return _get_sample(cp, size, i) + + +def max(cp, size): + _check_params(len(cp), size) + + if len(cp) == 0: + return 0 + + return builtins.max(abs(sample) for sample in _get_samples(cp, size)) + + +def minmax(cp, size): + _check_params(len(cp), size) + + max_sample, min_sample = 0, 0 + for sample in _get_samples(cp, size): + max_sample = builtins.max(sample, max_sample) + min_sample = builtins.min(sample, min_sample) + + return min_sample, max_sample + + +def avg(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + return sum(_get_samples(cp, size)) / sample_count + + +def rms(cp, size): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + + sum_squares = sum(sample**2 for sample in _get_samples(cp, size)) + return int(math.sqrt(sum_squares / sample_count)) + + +def _sum2(cp1, cp2, length): + size = 2 + return sum(getsample(cp1, size, i) * getsample(cp2, size, i) + for i in range(length)) + + +def findfit(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0 or len(cp2) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) < len(cp2): + raise error("First sample should be longer") + + len1 = _sample_count(cp1, size) + len2 = _sample_count(cp2, size) + + sum_ri_2 = _sum2(cp2, cp2, len2) + sum_aij_2 = _sum2(cp1, cp1, len2) + sum_aij_ri = _sum2(cp1, cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + best_result = result + best_i = 0 + + for i in range(1, len1 - len2 + 1): + aj_m1 = _get_sample(cp1, size, i - 1) + aj_lm1 = _get_sample(cp1, size, i + len2 - 1) + + sum_aij_2 += aj_lm1**2 - aj_m1**2 + sum_aij_ri = _sum2(_buffer(cp1)[i*size:], cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + if result < best_result: + best_result = result + best_i = i + + factor = _sum2(_buffer(cp1)[best_i*size:], cp2, len2) / sum_ri_2 + + return best_i, factor + + +def findfactor(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) != len(cp2): + raise error("Samples should be same size") + + sample_count = _sample_count(cp1, size) + + sum_ri_2 = _sum2(cp2, cp2, sample_count) + sum_aij_ri = _sum2(cp1, cp2, sample_count) + + return sum_aij_ri / sum_ri_2 + + +def findmax(cp, len2): + size = 2 + sample_count = _sample_count(cp, size) + + if len(cp) % 2 != 0: + raise error("Strings should be even-sized") + + if len2 < 0 or sample_count < len2: + raise error("Input sample should be longer") + + if sample_count == 0: + return 0 + + result = _sum2(cp, cp, len2) + best_result = result + best_i = 0 + + for i in range(1, sample_count - len2 + 1): + sample_leaving_window = getsample(cp, size, i - 1) + sample_entering_window = getsample(cp, size, i + len2 - 1) + + result -= sample_leaving_window**2 + result += sample_entering_window**2 + + if result > best_result: + best_result = result + best_i = i + + return best_i + + +def avgpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + prevextreme = None + avg = 0 + nextreme = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + avg += abs(prevval - prevextreme) + nextreme += 1 + + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + if nextreme == 0: + return 0 + + return avg / nextreme + + +def maxpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + prevextreme = None + max = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + extremediff = abs(prevval - prevextreme) + if extremediff > max: + max = extremediff + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + return max + + +def cross(cp, size): + _check_params(len(cp), size) + + crossings = 0 + last_sample = 0 + for sample in _get_samples(cp, size): + if sample <= 0 < last_sample or sample >= 0 > last_sample: + crossings += 1 + last_sample = sample + + return crossings + + +def mul(cp, size, factor): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = clip(int(sample * factor)) + _put_sample(result, size, i, sample) + + return result.raw + + +def tomono(cp, size, fac1, fac2): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) / 2) + + for i in range(0, sample_count, 2): + l_sample = getsample(cp, size, i) + r_sample = getsample(cp, size, i + 1) + + sample = (l_sample * fac1) + (r_sample * fac2) + sample = clip(sample) + + _put_sample(result, size, i / 2, sample) + + return result.raw + + +def tostereo(cp, size, fac1, fac2): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) * 2) + clip = _get_clipfn(size) + + for i in range(sample_count): + sample = _get_sample(cp, size, i) + + l_sample = clip(sample * fac1) + r_sample = clip(sample * fac2) + + _put_sample(result, size, i * 2, l_sample) + _put_sample(result, size, i * 2 + 1, r_sample) + + return result.raw + + +def add(cp1, cp2, size): + _check_params(len(cp1), size) + + if len(cp1) != len(cp2): + raise error("Lengths should be the same") + + clip = _get_clipfn(size) + sample_count = _sample_count(cp1, size) + result = create_string_buffer(len(cp1)) + + for i in range(sample_count): + sample1 = getsample(cp1, size, i) + sample2 = getsample(cp2, size, i) + + sample = clip(sample1 + sample2) + + _put_sample(result, size, i, sample) + + return result.raw + + +def bias(cp, size, bias): + _check_params(len(cp), size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = _overflow(sample + bias, size) + _put_sample(result, size, i, sample) + + return result.raw + + +def reverse(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp)) + for i, sample in enumerate(_get_samples(cp, size)): + _put_sample(result, size, sample_count - i - 1, sample) + + return result.raw + + +def lin2lin(cp, size, size2): + _check_params(len(cp), size) + _check_size(size2) + + if size == size2: + return cp + + new_len = (len(cp) / size) * size2 + + result = create_string_buffer(new_len) + + for i in range(_sample_count(cp, size)): + sample = _get_sample(cp, size, i) + if size < size2: + sample = sample << (4 * size2 / size) + elif size > size2: + sample = sample >> (4 * size / size2) + + sample = _overflow(sample, size2) + + _put_sample(result, size2, i, sample) + + return result.raw + + +def ratecv(cp, size, nchannels, inrate, outrate, state, weightA=1, weightB=0): + _check_params(len(cp), size) + if nchannels < 1: + raise error("# of channels should be >= 1") + + bytes_per_frame = size * nchannels + frame_count = len(cp) / bytes_per_frame + + if bytes_per_frame / nchannels != size: + raise OverflowError("width * nchannels too big for a C int") + + if weightA < 1 or weightB < 0: + raise error("weightA should be >= 1, weightB should be >= 0") + + if len(cp) % bytes_per_frame != 0: + raise error("not a whole number of frames") + + if inrate <= 0 or outrate <= 0: + raise error("sampling rate not > 0") + + d = gcd(inrate, outrate) + inrate /= d + outrate /= d + + prev_i = [0] * nchannels + cur_i = [0] * nchannels + + if state is None: + d = -outrate + else: + d, samps = state + + if len(samps) != nchannels: + raise error("illegal state argument") + + prev_i, cur_i = zip(*samps) + prev_i, cur_i = list(prev_i), list(cur_i) + + q = frame_count / inrate + ceiling = (q + 1) * outrate + nbytes = ceiling * bytes_per_frame + + result = create_string_buffer(nbytes) + + samples = _get_samples(cp, size) + out_i = 0 + while True: + while d < 0: + if frame_count == 0: + samps = zip(prev_i, cur_i) + retval = result.raw + + # slice off extra bytes + trim_index = (out_i * bytes_per_frame) - len(retval) + retval = _buffer(retval)[:trim_index] + + return (retval, (d, tuple(samps))) + + for chan in range(nchannels): + prev_i[chan] = cur_i[chan] + cur_i[chan] = next(samples) + + cur_i[chan] = ( + (weightA * cur_i[chan] + weightB * prev_i[chan]) + / (weightA + weightB) + ) + + frame_count -= 1 + d += outrate + + while d >= 0: + for chan in range(nchannels): + cur_o = ( + (prev_i[chan] * d + cur_i[chan] * (outrate - d)) + / outrate + ) + _put_sample(result, size, out_i, _overflow(cur_o, size)) + out_i += 1 + d -= inrate + + +def lin2ulaw(cp, size): + raise NotImplementedError() + + +def ulaw2lin(cp, size): + raise NotImplementedError() + + +def lin2alaw(cp, size): + raise NotImplementedError() + + +def alaw2lin(cp, size): + raise NotImplementedError() + + +def lin2adpcm(cp, size, state): + raise NotImplementedError() + + +def adpcm2lin(cp, size, state): + raise NotImplementedError() diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8" -__version_info__ = (0, 8) +__version__ = "0.8.2" +__version_info__ = (0, 8, 2) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,4 @@ -import types +import sys, types from .lock import allocate_lock try: @@ -88,18 +88,20 @@ self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() - def cdef(self, csource, override=False): + def cdef(self, csource, override=False, packed=False): """Parse the given C source. This registers all declared functions, types, and global variables. The functions and global variables can then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. The types can be used in 'ffi.new()' and other functions. + If 'packed' is specified as True, all structs declared inside this + cdef are packed, i.e. laid out without any field alignment at all. """ if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: - self._parser.parse(csource, override=override) + self._parser.parse(csource, override=override, packed=packed) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -129,11 +131,9 @@ cdecl = cdecl.encode('ascii') # type = self._parser.parse_type(cdecl) - if hasattr(type, 'as_function_pointer'): - really_a_function_type = True + really_a_function_type = type.is_raw_function + if really_a_function_type: type = type.as_function_pointer() - else: - really_a_function_type = False btype = self._get_cached_btype(type) result = btype, really_a_function_type self._parsed_types[key] = result @@ -349,6 +349,9 @@ errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + def _pointer_to(self, ctype): from . import model with self._lock: @@ -386,22 +389,27 @@ return self._backend.from_handle(x) -def _make_ffi_library(ffi, libname, flags): - import os - name = libname +def _load_backend_lib(backend, name, flags): if name is None: - name = 'c' # on Posix only - backend = ffi._backend + if sys.platform != "win32": + return backend.load_library(None, flags) + name = "c" # Windows: load_library(None) fails, but this works + # (backward compatibility hack only) try: if '.' not in name and '/' not in name: raise OSError("library not found: %r" % (name,)) - backendlib = backend.load_library(name, flags) + return backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: raise # propagate the original OSError - backendlib = backend.load_library(path, flags) + return backend.load_library(path, flags) + +def _make_ffi_library(ffi, libname, flags): + import os + backend = ffi._backend + backendlib = _load_backend_lib(backend, libname, flags) copied_enums = [] # def make_accessor_locked(name): diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -720,7 +720,7 @@ return self._new_struct_or_union('union', name, ctypes.Union) def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, - totalsize=-1, totalalignment=-1): + totalsize=-1, totalalignment=-1, sflags=0): if totalsize >= 0 or totalalignment >= 0: raise NotImplementedError("the ctypes backend of CFFI does not support " "structures completed by verify(); please " @@ -739,6 +739,8 @@ else: cfields.append((fname, BField._ctype, bitsize)) bfield_types[fname] = Ellipsis + if sflags & 8: + struct_or_union._pack_ = 1 struct_or_union._fields_ = cfields CTypesStructOrUnion._bfield_types = bfield_types # diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -98,6 +98,7 @@ self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() self._override = False + self._packed = False def _parse(self, csource): csource, macros = _preprocess(csource) @@ -147,13 +148,16 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False): + def parse(self, csource, override=False, packed=False): prev_override = self._override + prev_packed = self._packed try: self._override = override + self._packed = packed self._internal_parse(csource) finally: self._override = prev_override + self._packed = prev_packed def _internal_parse(self, csource): ast, macros = self._parse(csource) @@ -476,6 +480,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) + tp.packed = self._packed return tp def _make_partial(self, tp, nested): diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,7 +1,12 @@ +import types import weakref +from .lock import allocate_lock + + class BaseTypeByIdentity(object): is_array_type = False + is_raw_function = False def get_c_name(self, replace_with='', context='a C file'): result = self.c_name_with_marker @@ -78,29 +83,29 @@ 'long': 'i', 'long long': 'i', 'signed char': 'i', - 'unsigned char': 'u', - 'unsigned short': 'u', - 'unsigned int': 'u', - 'unsigned long': 'u', - 'unsigned long long': 'u', + 'unsigned char': 'i', + 'unsigned short': 'i', + 'unsigned int': 'i', + 'unsigned long': 'i', + 'unsigned long long': 'i', 'float': 'f', 'double': 'f', 'long double': 'f', - '_Bool': 'u', + '_Bool': 'i', # the following types are not primitive in the C sense 'wchar_t': 'c', 'int8_t': 'i', - 'uint8_t': 'u', + 'uint8_t': 'i', 'int16_t': 'i', - 'uint16_t': 'u', + 'uint16_t': 'i', 'int32_t': 'i', - 'uint32_t': 'u', + 'uint32_t': 'i', 'int64_t': 'i', - 'uint64_t': 'u', + 'uint64_t': 'i', 'intptr_t': 'i', - 'uintptr_t': 'u', + 'uintptr_t': 'i', 'ptrdiff_t': 'i', - 'size_t': 'u', + 'size_t': 'i', 'ssize_t': 'i', } @@ -111,12 +116,8 @@ def is_char_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' - def is_signed_type(self): + def is_integer_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' - def is_unsigned_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'u' - def is_integer_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] in 'iu' def is_float_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' @@ -146,6 +147,7 @@ # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' + is_raw_function = True def build_backend_type(self, ffi, finishlist): from . import api @@ -212,8 +214,10 @@ self.item = item self.length = length # - if length is None or length == '...': + if length is None: brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' else: brackets = '&[%d]' % length self.c_name_with_marker = ( @@ -253,6 +257,7 @@ fixedlayout = None completed = False partial = False + packed = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -309,7 +314,11 @@ fldtypes = [tp.get_cached_btype(ffi, finishlist) for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) - ffi._backend.complete_struct_or_union(BType, lst, self) + sflags = 0 + if self.packed: + sflags = 8 # SF_PACKED + ffi._backend.complete_struct_or_union(BType, lst, self, + -1, -1, sflags) # else: fldtypes = [] @@ -448,6 +457,9 @@ tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) + +global_lock = allocate_lock() + def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds @@ -459,8 +471,7 @@ # initialize the __typecache attribute, either at the module level # if ffi._backend is a module, or at the class level if ffi._backend # is some instance. - ModuleType = type(weakref) - if isinstance(ffi._backend, ModuleType): + if isinstance(ffi._backend, types.ModuleType): ffi._backend.__typecache = weakref.WeakValueDictionary() else: type(ffi._backend).__typecache = weakref.WeakValueDictionary() @@ -468,8 +479,17 @@ res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%r: %s" % (srctype, e)) - ffi._backend.__typecache[key] = res - return res + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._backend.__typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -160,7 +160,10 @@ def __dir__(self): return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() - module._cffi_setup(lst, ffiplatform.VerificationError, library) + if module._cffi_setup(lst, ffiplatform.VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) # # finally, call the loaded_cpy_xxx() functions. This will perform # the final adjustments, like copying the Python->C wrapper @@ -211,10 +214,7 @@ extraarg = '' if isinstance(tp, model.PrimitiveType): if tp.is_integer_type() and tp.name != '_Bool': - if tp.is_signed_type(): - converter = '_cffi_to_c_SIGNED' - else: - converter = '_cffi_to_c_UNSIGNED' + converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) @@ -267,10 +267,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.PrimitiveType): if tp.is_integer_type(): From noreply at buildbot.pypy.org Sat Mar 1 21:35:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 21:35:20 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.5: operationerrfmt -> oefmt Message-ID: <20140301203520.AC23F1C1504@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.5 Changeset: r69576:e257da05e295 Date: 2014-03-01 15:34 -0500 http://bitbucket.org/pypy/pypy/changeset/e257da05e295/ Log: operationerrfmt -> oefmt diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -1,7 +1,7 @@ import sys from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, generic_new_descr, interp_attrproperty, @@ -331,7 +331,7 @@ def check_decoded(space, w_decoded): if not space.isinstance_w(w_decoded, space.w_unicode): msg = "decoder should return a string result, not '%T'" - raise operationerrfmt(space.w_TypeError, msg, w_decoded) + raise oefmt(space.w_TypeError, msg, w_decoded) class W_TextIOWrapper(W_TextIOBase): @@ -557,7 +557,7 @@ if not space.isinstance_w(w_input, space.w_str): msg = "decoder getstate() should have returned a bytes " \ "object not '%T'" - raise operationerrfmt(space.w_TypeError, msg, w_input) + raise oefmt(space.w_TypeError, msg, w_input) eof = space.len_w(w_input) == 0 w_decoded = space.call_method(self.w_decoder, "decode", @@ -719,7 +719,7 @@ if not space.isinstance_w(w_text, space.w_unicode): msg = "unicode argument expected, got '%T'" - raise operationerrfmt(space.w_TypeError, msg, w_text) + raise oefmt(space.w_TypeError, msg, w_text) text = space.unicode_w(w_text) textlen = len(text) @@ -868,7 +868,7 @@ if not space.isinstance_w(w_chunk, space.w_str): msg = "underlying read() should have returned " \ "a bytes object, not '%T'" - raise operationerrfmt(space.w_TypeError, msg, w_chunk) + raise oefmt(space.w_TypeError, msg, w_chunk) self.snapshot = PositionSnapshot(cookie.dec_flags, space.str_w(w_chunk)) From noreply at buildbot.pypy.org Sat Mar 1 22:24:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 22:24:07 +0100 (CET) Subject: [pypy-commit] pypy vendor/stdlib: update python 2 stdlib to 2.7.6 Message-ID: <20140301212407.4EC131C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: vendor/stdlib Changeset: r69577:964879152e2e Date: 2014-03-01 15:44 -0500 http://bitbucket.org/pypy/pypy/changeset/964879152e2e/ Log: update python 2 stdlib to 2.7.6 diff too long, truncating to 2000 out of 10381 lines diff --git a/lib-python/2.7/CGIHTTPServer.py b/lib-python/2.7/CGIHTTPServer.py --- a/lib-python/2.7/CGIHTTPServer.py +++ b/lib-python/2.7/CGIHTTPServer.py @@ -105,18 +105,17 @@ def run_cgi(self): """Execute a CGI script.""" - path = self.path dir, rest = self.cgi_info - i = path.find('/', len(dir) + 1) + i = rest.find('/') while i >= 0: - nextdir = path[:i] - nextrest = path[i+1:] + nextdir = rest[:i] + nextrest = rest[i+1:] scriptdir = self.translate_path(nextdir) if os.path.isdir(scriptdir): dir, rest = nextdir, nextrest - i = path.find('/', len(dir) + 1) + i = rest.find('/') else: break diff --git a/lib-python/2.7/Cookie.py b/lib-python/2.7/Cookie.py --- a/lib-python/2.7/Cookie.py +++ b/lib-python/2.7/Cookie.py @@ -238,7 +238,7 @@ # a two-way quoting algorithm. Any non-text character is translated # into a 4 character sequence: a forward-slash followed by the # three-digit octal equivalent of the character. Any '\' or '"' is -# quoted with a preceeding '\' slash. +# quoted with a preceding '\' slash. # # These are taken from RFC2068 and RFC2109. # _LegalChars is the list of chars which don't require "'s diff --git a/lib-python/2.7/Queue.py b/lib-python/2.7/Queue.py --- a/lib-python/2.7/Queue.py +++ b/lib-python/2.7/Queue.py @@ -109,7 +109,7 @@ If optional args 'block' is true and 'timeout' is None (the default), block if necessary until a free slot is available. If 'timeout' is - a positive number, it blocks at most 'timeout' seconds and raises + a non-negative number, it blocks at most 'timeout' seconds and raises the Full exception if no free slot was available within that time. Otherwise ('block' is false), put an item on the queue if a free slot is immediately available, else raise the Full exception ('timeout' @@ -125,7 +125,7 @@ while self._qsize() == self.maxsize: self.not_full.wait() elif timeout < 0: - raise ValueError("'timeout' must be a positive number") + raise ValueError("'timeout' must be a non-negative number") else: endtime = _time() + timeout while self._qsize() == self.maxsize: @@ -152,7 +152,7 @@ If optional args 'block' is true and 'timeout' is None (the default), block if necessary until an item is available. If 'timeout' is - a positive number, it blocks at most 'timeout' seconds and raises + a non-negative number, it blocks at most 'timeout' seconds and raises the Empty exception if no item was available within that time. Otherwise ('block' is false), return an item if one is immediately available, else raise the Empty exception ('timeout' is ignored @@ -167,7 +167,7 @@ while not self._qsize(): self.not_empty.wait() elif timeout < 0: - raise ValueError("'timeout' must be a positive number") + raise ValueError("'timeout' must be a non-negative number") else: endtime = _time() + timeout while not self._qsize(): diff --git a/lib-python/2.7/SimpleHTTPServer.py b/lib-python/2.7/SimpleHTTPServer.py --- a/lib-python/2.7/SimpleHTTPServer.py +++ b/lib-python/2.7/SimpleHTTPServer.py @@ -149,6 +149,8 @@ # abandon query parameters path = path.split('?',1)[0] path = path.split('#',1)[0] + # Don't forget explicit trailing slash when normalizing. Issue17324 + trailing_slash = path.rstrip().endswith('/') path = posixpath.normpath(urllib.unquote(path)) words = path.split('/') words = filter(None, words) @@ -158,6 +160,8 @@ head, word = os.path.split(word) if word in (os.curdir, os.pardir): continue path = os.path.join(path, word) + if trailing_slash: + path += '/' return path def copyfile(self, source, outputfile): diff --git a/lib-python/2.7/_osx_support.py b/lib-python/2.7/_osx_support.py --- a/lib-python/2.7/_osx_support.py +++ b/lib-python/2.7/_osx_support.py @@ -53,7 +53,7 @@ def _read_output(commandstring): - """Output from succesful command execution or None""" + """Output from successful command execution or None""" # Similar to os.popen(commandstring, "r").read(), # but without actually using os.popen because that # function is not usable during python bootstrap. @@ -68,7 +68,7 @@ with contextlib.closing(fp) as fp: cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) - return fp.read().decode('utf-8').strip() if not os.system(cmd) else None + return fp.read().strip() if not os.system(cmd) else None def _find_build_tool(toolname): @@ -152,7 +152,7 @@ # are not installed. # # Futhermore, the compiler that can be used varies between - # Xcode releases. Upto Xcode 4 it was possible to use 'gcc-4.2' + # Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2' # as the compiler, after that 'clang' should be used because # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that # miscompiles Python. @@ -192,7 +192,7 @@ if cc != oldcc: # Found a replacement compiler. - # Modify config vars using new compiler, if not already explictly + # Modify config vars using new compiler, if not already explicitly # overriden by an env variable, preserving additional arguments. for cv in _COMPILER_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: @@ -235,13 +235,19 @@ if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None: # NOTE: Cannot use subprocess here because of bootstrap # issues when building Python itself - status = os.system("'%s' -arch ppc -x c /dev/null 2>/dev/null"%( - _config_vars['CC'].replace("'", "'\"'\"'"),)) - # The Apple compiler drivers return status 255 if no PPC - if (status >> 8) == 255: - # Compiler doesn't support PPC, remove the related - # '-arch' flags if not explicitly overridden by an - # environment variable + status = os.system( + """echo 'int main{};' | """ + """'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null""" + %(_config_vars['CC'].replace("'", "'\"'\"'"),)) + if status: + # The compile failed for some reason. Because of differences + # across Xcode and compiler versions, there is no reliable way + # to be sure why it failed. Assume here it was due to lack of + # PPC support and remove the related '-arch' flags from each + # config variables not explicitly overriden by an environment + # variable. If the error was for some other reason, we hope the + # failure will show up again when trying to compile an extension + # module. for cv in _UNIVERSAL_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] @@ -274,7 +280,7 @@ # compile an extension using an SDK that is not present # on the current machine it is better to not use an SDK # than to fail. This is particularly important with - # the standalong Command Line Tools alternative to a + # the standalone Command Line Tools alternative to a # full-blown Xcode install since the CLT packages do not # provide SDKs. If the SDK is not present, it is assumed # that the header files and dev libs have been installed @@ -378,7 +384,7 @@ compilers are present, i.e. when installing pure Python dists. Customization of compiler paths and detection of unavailable archs is deferred - until the first extention module build is + until the first extension module build is requested (in distutils.sysconfig.customize_compiler). Currently called from distutils.sysconfig diff --git a/lib-python/2.7/_strptime.py b/lib-python/2.7/_strptime.py --- a/lib-python/2.7/_strptime.py +++ b/lib-python/2.7/_strptime.py @@ -222,7 +222,7 @@ """Convert a list to a regex string for matching a directive. Want possible matching values to be from longest to shortest. This - prevents the possibility of a match occuring for a value that also + prevents the possibility of a match occurring for a value that also a substring of a larger value that should have matched (e.g., 'abc' matching when 'abcdef' should have been the match). diff --git a/lib-python/2.7/_weakrefset.py b/lib-python/2.7/_weakrefset.py --- a/lib-python/2.7/_weakrefset.py +++ b/lib-python/2.7/_weakrefset.py @@ -171,6 +171,12 @@ return NotImplemented return self.data == set(ref(item) for item in other) + def __ne__(self, other): + opposite = self.__eq__(other) + if opposite is NotImplemented: + return NotImplemented + return not opposite + def symmetric_difference(self, other): newset = self.copy() newset.symmetric_difference_update(other) diff --git a/lib-python/2.7/aifc.py b/lib-python/2.7/aifc.py --- a/lib-python/2.7/aifc.py +++ b/lib-python/2.7/aifc.py @@ -123,7 +123,7 @@ compression type, and then write audio frames using writeframesraw. When all frames have been written, either call writeframes('') or close() to patch up the sizes in the header. -Marks can be added anytime. If there are any marks, ypu must call +Marks can be added anytime. If there are any marks, you must call close() after all frames have been written. The close() method is called automatically when the class instance is destroyed. @@ -480,31 +480,30 @@ pass else: self._convert = self._adpcm2lin - self._framesize = self._framesize // 4 + self._sampwidth = 2 return # for ULAW and ALAW try Compression Library try: import cl except ImportError: - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): try: import audioop self._convert = self._ulaw2lin - self._framesize = self._framesize // 2 + self._sampwidth = 2 return except ImportError: pass raise Error, 'cannot read compressed AIFF-C files' - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): scheme = cl.G711_ULAW - self._framesize = self._framesize // 2 - elif self._comptype == 'ALAW': + elif self._comptype in ('ALAW', 'alaw'): scheme = cl.G711_ALAW - self._framesize = self._framesize // 2 else: raise Error, 'unsupported compression type' self._decomp = cl.OpenDecompressor(scheme) self._convert = self._decomp_data + self._sampwidth = 2 else: self._comptype = 'NONE' self._compname = 'not compressed' @@ -655,7 +654,7 @@ def setcomptype(self, comptype, compname): if self._nframeswritten: raise Error, 'cannot change parameters after starting to write' - if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'): + if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): raise Error, 'unsupported compression type' self._comptype = comptype self._compname = compname @@ -675,7 +674,7 @@ nchannels, sampwidth, framerate, nframes, comptype, compname = info if self._nframeswritten: raise Error, 'cannot change parameters after starting to write' - if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'): + if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): raise Error, 'unsupported compression type' self.setnchannels(nchannels) self.setsampwidth(sampwidth) @@ -804,7 +803,7 @@ try: import cl except ImportError: - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): try: import audioop self._convert = self._lin2ulaw @@ -812,9 +811,9 @@ except ImportError: pass raise Error, 'cannot write compressed AIFF-C files' - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): scheme = cl.G711_ULAW - elif self._comptype == 'ALAW': + elif self._comptype in ('ALAW', 'alaw'): scheme = cl.G711_ALAW else: raise Error, 'unsupported compression type' @@ -867,7 +866,10 @@ _write_short(self._file, self._nchannels) self._nframes_pos = self._file.tell() _write_ulong(self._file, self._nframes) - _write_short(self._file, self._sampwidth * 8) + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): + _write_short(self._file, 8) + else: + _write_short(self._file, self._sampwidth * 8) _write_float(self._file, self._framerate) if self._aifc: self._file.write(self._comptype) @@ -953,23 +955,27 @@ sys.argv.append('/usr/demos/data/audio/bach.aiff') fn = sys.argv[1] f = open(fn, 'r') - print "Reading", fn - print "nchannels =", f.getnchannels() - print "nframes =", f.getnframes() - print "sampwidth =", f.getsampwidth() - print "framerate =", f.getframerate() - print "comptype =", f.getcomptype() - print "compname =", f.getcompname() - if sys.argv[2:]: - gn = sys.argv[2] - print "Writing", gn - g = open(gn, 'w') - g.setparams(f.getparams()) - while 1: - data = f.readframes(1024) - if not data: - break - g.writeframes(data) - g.close() + try: + print "Reading", fn + print "nchannels =", f.getnchannels() + print "nframes =", f.getnframes() + print "sampwidth =", f.getsampwidth() + print "framerate =", f.getframerate() + print "comptype =", f.getcomptype() + print "compname =", f.getcompname() + if sys.argv[2:]: + gn = sys.argv[2] + print "Writing", gn + g = open(gn, 'w') + try: + g.setparams(f.getparams()) + while 1: + data = f.readframes(1024) + if not data: + break + g.writeframes(data) + finally: + g.close() + print "Done." + finally: f.close() - print "Done." diff --git a/lib-python/2.7/calendar.py b/lib-python/2.7/calendar.py --- a/lib-python/2.7/calendar.py +++ b/lib-python/2.7/calendar.py @@ -220,7 +220,7 @@ def yeardatescalendar(self, year, width=3): """ Return the data for the specified year ready for formatting. The return - value is a list of month rows. Each month row contains upto width months. + value is a list of month rows. Each month row contains up to width months. Each month contains between 4 and 6 weeks and each week contains 1-7 days. Days are datetime.date objects. """ diff --git a/lib-python/2.7/cgi.py b/lib-python/2.7/cgi.py --- a/lib-python/2.7/cgi.py +++ b/lib-python/2.7/cgi.py @@ -697,6 +697,9 @@ if not line: self.done = -1 break + if delim == "\r": + line = delim + line + delim = "" if line[:2] == "--" and last_line_lfend: strippedline = line.strip() if strippedline == next: @@ -713,6 +716,12 @@ delim = "\n" line = line[:-1] last_line_lfend = True + elif line[-1] == "\r": + # We may interrupt \r\n sequences if they span the 2**16 + # byte boundary + delim = "\r" + line = line[:-1] + last_line_lfend = False else: delim = "" last_line_lfend = False diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -270,6 +270,12 @@ 'Return self as a plain tuple. Used by copy and pickle.' return tuple(self) + __dict__ = _property(_asdict) + + def __getstate__(self): + 'Exclude the OrderedDict from pickling' + pass + {field_defs} ''' @@ -363,7 +369,7 @@ result = namespace[typename] # For pickling to work, the __module__ variable needs to be set to the frame - # where the named tuple is created. Bypass this step in enviroments where + # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython). try: diff --git a/lib-python/2.7/compiler/pyassem.py b/lib-python/2.7/compiler/pyassem.py --- a/lib-python/2.7/compiler/pyassem.py +++ b/lib-python/2.7/compiler/pyassem.py @@ -125,7 +125,7 @@ # Make sure every block appears in dominators, even if no # other block must precede it. dominators.setdefault(b, set()) - # preceeding blocks dominate following blocks + # preceding blocks dominate following blocks for c in b.get_followers(): while 1: dominators.setdefault(c, set()).add(b) diff --git a/lib-python/2.7/csv.py b/lib-python/2.7/csv.py --- a/lib-python/2.7/csv.py +++ b/lib-python/2.7/csv.py @@ -261,8 +261,9 @@ # if we see an extra quote between delimiters, we've got a # double quoted format - dq_regexp = re.compile(r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \ - {'delim':delim, 'quote':quotechar}, re.MULTILINE) + dq_regexp = re.compile( + r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \ + {'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE) diff --git a/lib-python/2.7/ctypes/test/runtests.py b/lib-python/2.7/ctypes/test/runtests.py --- a/lib-python/2.7/ctypes/test/runtests.py +++ b/lib-python/2.7/ctypes/test/runtests.py @@ -2,7 +2,7 @@ Run all tests found in this directory, and print a summary of the results. Command line flags: - -q quiet mode: don't prnt anything while the tests are running + -q quiet mode: don't print anything while the tests are running -r run tests repeatedly, look for refcount leaks -u Add resources to the lits of allowed resources. '*' allows all diff --git a/lib-python/2.7/ctypes/test/test_cfuncs.py b/lib-python/2.7/ctypes/test/test_cfuncs.py --- a/lib-python/2.7/ctypes/test/test_cfuncs.py +++ b/lib-python/2.7/ctypes/test/test_cfuncs.py @@ -188,7 +188,7 @@ self.assertEqual(self._dll.tv_i(-42), None) self.assertEqual(self.S(), -42) -# The following repeates the above tests with stdcall functions (where +# The following repeats the above tests with stdcall functions (where # they are available) try: WinDLL diff --git a/lib-python/2.7/ctypes/test/test_integers.py b/lib-python/2.7/ctypes/test/test_integers.py --- a/lib-python/2.7/ctypes/test/test_integers.py +++ b/lib-python/2.7/ctypes/test/test_integers.py @@ -1,4 +1,4 @@ -# superseeded by test_numbers.py +# superseded by test_numbers.py import unittest if __name__ == '__main__': diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -212,7 +212,7 @@ def test_init(self): # c_int() can be initialized from Python's int, and c_int. - # Not from c_long or so, which seems strange, abd should + # Not from c_long or so, which seems strange, abc should # probably be changed: self.assertRaises(TypeError, c_int, c_long(42)) diff --git a/lib-python/2.7/ctypes/test/test_refcounts.py b/lib-python/2.7/ctypes/test/test_refcounts.py --- a/lib-python/2.7/ctypes/test/test_refcounts.py +++ b/lib-python/2.7/ctypes/test/test_refcounts.py @@ -41,7 +41,7 @@ # this is the standard refcount for func self.assertEqual(grc(func), 2) - # the CFuncPtr instance holds atr least one refcount on func: + # the CFuncPtr instance holds at least one refcount on func: f = OtherCallback(func) self.assertTrue(grc(func) > 2) @@ -58,7 +58,7 @@ x = X() x.a = OtherCallback(func) - # the CFuncPtr instance holds atr least one refcount on func: + # the CFuncPtr instance holds at least one refcount on func: self.assertTrue(grc(func) > 2) # and may release it again @@ -71,7 +71,7 @@ f = OtherCallback(func) - # the CFuncPtr instance holds atr least one refcount on func: + # the CFuncPtr instance holds at least one refcount on func: self.assertTrue(grc(func) > 2) # create a cycle diff --git a/lib-python/2.7/ctypes/test/test_structures.py b/lib-python/2.7/ctypes/test/test_structures.py --- a/lib-python/2.7/ctypes/test/test_structures.py +++ b/lib-python/2.7/ctypes/test/test_structures.py @@ -108,7 +108,7 @@ def test_emtpy(self): # I had problems with these # - # Although these are patological cases: Empty Structures! + # Although these are pathological cases: Empty Structures! class X(Structure): _fields_ = [] diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -93,7 +93,7 @@ fdout, ccout = tempfile.mkstemp() os.close(fdout) cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \ - '$CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name + 'LANG=C LC_ALL=C $CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name try: f = os.popen(cmd) try: diff --git a/lib-python/2.7/decimal.py b/lib-python/2.7/decimal.py --- a/lib-python/2.7/decimal.py +++ b/lib-python/2.7/decimal.py @@ -25,7 +25,7 @@ and IEEE standard 854-1987: - www.cs.berkeley.edu/~ejr/projects/754/private/drafts/854-1987/dir.html + http://en.wikipedia.org/wiki/IEEE_854-1987 Decimal floating point has finite precision with arbitrarily large bounds. diff --git a/lib-python/2.7/difflib.py b/lib-python/2.7/difflib.py --- a/lib-python/2.7/difflib.py +++ b/lib-python/2.7/difflib.py @@ -586,7 +586,7 @@ def get_grouped_opcodes(self, n=3): """ Isolate change clusters by eliminating ranges with no changes. - Return a generator of groups with upto n lines of context. + Return a generator of groups with up to n lines of context. Each group is in the same format as returned by get_opcodes(). >>> from pprint import pprint @@ -1361,7 +1361,7 @@ linejunk -- passed on to ndiff (see ndiff documentation) charjunk -- passed on to ndiff (see ndiff documentation) - This function returns an interator which returns a tuple: + This function returns an iterator which returns a tuple: (from line tuple, to line tuple, boolean flag) from/to line tuple -- (line num, line text) @@ -1963,7 +1963,7 @@ self._make_prefix() # change tabs to spaces before it gets more difficult after we insert - # markkup + # markup fromlines,tolines = self._tab_newline_replace(fromlines,tolines) # create diffs iterator which generates side by side from/to data diff --git a/lib-python/2.7/distutils/__init__.py b/lib-python/2.7/distutils/__init__.py --- a/lib-python/2.7/distutils/__init__.py +++ b/lib-python/2.7/distutils/__init__.py @@ -15,5 +15,5 @@ # Updated automatically by the Python release process. # #--start constants-- -__version__ = "2.7.5" +__version__ = "2.7.6" #--end constants-- diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -231,12 +231,10 @@ # building python standard extensions self.library_dirs.append('.') - # for extensions under Linux or Solaris with a shared Python library, + # For building extensions with a shared Python library, # Python's library directory must be appended to library_dirs - sysconfig.get_config_var('Py_ENABLE_SHARED') - if ((sys.platform.startswith('linux') or sys.platform.startswith('gnu') - or sys.platform.startswith('sunos')) - and sysconfig.get_config_var('Py_ENABLE_SHARED')): + # See Issues: #1600860, #4366 + if (sysconfig.get_config_var('Py_ENABLE_SHARED')): if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")): # building third party extensions self.library_dirs.append(sysconfig.get_config_var('LIBDIR')) diff --git a/lib-python/2.7/distutils/command/sdist.py b/lib-python/2.7/distutils/command/sdist.py --- a/lib-python/2.7/distutils/command/sdist.py +++ b/lib-python/2.7/distutils/command/sdist.py @@ -183,7 +183,7 @@ depends on the user's options. """ # new behavior when using a template: - # the file list is recalculated everytime because + # the file list is recalculated every time because # even if MANIFEST.in or setup.py are not changed # the user might have added some files in the tree that # need to be included. diff --git a/lib-python/2.7/distutils/cygwinccompiler.py b/lib-python/2.7/distutils/cygwinccompiler.py --- a/lib-python/2.7/distutils/cygwinccompiler.py +++ b/lib-python/2.7/distutils/cygwinccompiler.py @@ -319,13 +319,18 @@ else: entry_point = '' - self.set_executables(compiler='gcc -mno-cygwin -O -Wall', - compiler_so='gcc -mno-cygwin -mdll -O -Wall', - compiler_cxx='g++ -mno-cygwin -O -Wall', - linker_exe='gcc -mno-cygwin', - linker_so='%s -mno-cygwin %s %s' - % (self.linker_dll, shared_option, - entry_point)) + if self.gcc_version < '4' or is_cygwingcc(): + no_cygwin = ' -mno-cygwin' + else: + no_cygwin = '' + + self.set_executables(compiler='gcc%s -O -Wall' % no_cygwin, + compiler_so='gcc%s -mdll -O -Wall' % no_cygwin, + compiler_cxx='g++%s -O -Wall' % no_cygwin, + linker_exe='gcc%s' % no_cygwin, + linker_so='%s%s %s %s' + % (self.linker_dll, no_cygwin, + shared_option, entry_point)) # Maybe we should also append -mthreads, but then the finished # dlls need another dll (mingwm10.dll see Mingw32 docs) # (-mthreads: Support thread-safe exception handling on `Mingw32') @@ -447,3 +452,12 @@ else: dllwrap_version = None return (gcc_version, ld_version, dllwrap_version) + +def is_cygwingcc(): + '''Try to determine if the gcc that would be used is from cygwin.''' + out = os.popen('gcc -dumpmachine', 'r') + out_string = out.read() + out.close() + # out_string is the target triplet cpu-vendor-os + # Cygwin's gcc sets the os to 'cygwin' + return out_string.strip().endswith('cygwin') diff --git a/lib-python/2.7/distutils/sysconfig.py b/lib-python/2.7/distutils/sysconfig.py --- a/lib-python/2.7/distutils/sysconfig.py +++ b/lib-python/2.7/distutils/sysconfig.py @@ -175,9 +175,15 @@ 'CCSHARED', 'LDSHARED', 'SO', 'AR', 'ARFLAGS') - newcc = None if 'CC' in os.environ: - cc = os.environ['CC'] + newcc = os.environ['CC'] + if (sys.platform == 'darwin' + and 'LDSHARED' not in os.environ + and ldshared.startswith(cc)): + # On OS X, if CC is overridden, use that as the default + # command for LDSHARED as well + ldshared = newcc + ldshared[len(cc):] + cc = newcc if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: diff --git a/lib-python/2.7/distutils/tests/test_build_clib.py b/lib-python/2.7/distutils/tests/test_build_clib.py --- a/lib-python/2.7/distutils/tests/test_build_clib.py +++ b/lib-python/2.7/distutils/tests/test_build_clib.py @@ -77,7 +77,7 @@ cmd.compiler = FakeCompiler() - # build_libraries is also doing a bit of typoe checking + # build_libraries is also doing a bit of typo checking lib = [('name', {'sources': 'notvalid'})] self.assertRaises(DistutilsSetupError, cmd.build_libraries, lib) diff --git a/lib-python/2.7/distutils/tests/test_cmd.py b/lib-python/2.7/distutils/tests/test_cmd.py --- a/lib-python/2.7/distutils/tests/test_cmd.py +++ b/lib-python/2.7/distutils/tests/test_cmd.py @@ -34,6 +34,18 @@ self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, 'not_string_list2') + cmd.option1 = 'ok,dok' + cmd.ensure_string_list('option1') + self.assertEqual(cmd.option1, ['ok', 'dok']) + + cmd.option2 = ['xxx', 'www'] + cmd.ensure_string_list('option2') + + cmd.option3 = ['ok', 2] + self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, + 'option3') + + def test_make_file(self): cmd = self.cmd @@ -77,19 +89,6 @@ cmd.option3 = 1 self.assertRaises(DistutilsOptionError, cmd.ensure_string, 'option3') - def test_ensure_string_list(self): - cmd = self.cmd - cmd.option1 = 'ok,dok' - cmd.ensure_string_list('option1') - self.assertEqual(cmd.option1, ['ok', 'dok']) - - cmd.option2 = ['xxx', 'www'] - cmd.ensure_string_list('option2') - - cmd.option3 = ['ok', 2] - self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, - 'option3') - def test_ensure_filename(self): cmd = self.cmd cmd.option1 = __file__ diff --git a/lib-python/2.7/distutils/tests/test_unixccompiler.py b/lib-python/2.7/distutils/tests/test_unixccompiler.py --- a/lib-python/2.7/distutils/tests/test_unixccompiler.py +++ b/lib-python/2.7/distutils/tests/test_unixccompiler.py @@ -1,7 +1,8 @@ """Tests for distutils.unixccompiler.""" +import os import sys import unittest -from test.test_support import run_unittest +from test.test_support import EnvironmentVarGuard, run_unittest from distutils import sysconfig from distutils.unixccompiler import UnixCCompiler @@ -122,6 +123,37 @@ sysconfig.get_config_var = gcv self.assertEqual(self.cc.rpath_foo(), '-R/foo') + @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for OS X') + def test_osx_cc_overrides_ldshared(self): + # Issue #18080: + # ensure that setting CC env variable also changes default linker + def gcv(v): + if v == 'LDSHARED': + return 'gcc-4.2 -bundle -undefined dynamic_lookup ' + return 'gcc-4.2' + sysconfig.get_config_var = gcv + with EnvironmentVarGuard() as env: + env['CC'] = 'my_cc' + del env['LDSHARED'] + sysconfig.customize_compiler(self.cc) + self.assertEqual(self.cc.linker_so[0], 'my_cc') + + @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for OS X') + def test_osx_explict_ldshared(self): + # Issue #18080: + # ensure that setting CC env variable does not change + # explicit LDSHARED setting for linker + def gcv(v): + if v == 'LDSHARED': + return 'gcc-4.2 -bundle -undefined dynamic_lookup ' + return 'gcc-4.2' + sysconfig.get_config_var = gcv + with EnvironmentVarGuard() as env: + env['CC'] = 'my_cc' + env['LDSHARED'] = 'my_ld -bundle -dynamic' + sysconfig.customize_compiler(self.cc) + self.assertEqual(self.cc.linker_so[0], 'my_ld') + def test_suite(): return unittest.makeSuite(UnixCCompilerTestCase) diff --git a/lib-python/2.7/doctest.py b/lib-python/2.7/doctest.py --- a/lib-python/2.7/doctest.py +++ b/lib-python/2.7/doctest.py @@ -424,7 +424,7 @@ zero-based, with respect to the beginning of the DocTest. - indent: The example's indentation in the DocTest string. - I.e., the number of space characters that preceed the + I.e., the number of space characters that precede the example's first prompt. - options: A dictionary mapping from option flags to True or @@ -564,7 +564,7 @@ # Want consists of any non-blank lines that do not start with PS1. (?P (?:(?![ ]*$) # Not a blank line (?![ ]*>>>) # Not a line starting with PS1 - .*$\n? # But any other line + .+$\n? # But any other line )*) ''', re.MULTILINE | re.VERBOSE) @@ -895,7 +895,7 @@ if '__name__' not in globs: globs['__name__'] = '__main__' # provide a default module name - # Recursively expore `obj`, extracting DocTests. + # Recursively explore `obj`, extracting DocTests. tests = [] self._find(tests, obj, name, module, source_lines, globs, {}) # Sort the tests by alpha order of names, for consistency in diff --git a/lib-python/2.7/email/charset.py b/lib-python/2.7/email/charset.py --- a/lib-python/2.7/email/charset.py +++ b/lib-python/2.7/email/charset.py @@ -183,7 +183,7 @@ header encoding. Charset.SHORTEST is not allowed for body_encoding. - output_charset: Some character sets must be converted before the can be + output_charset: Some character sets must be converted before they can be used in email headers or bodies. If the input_charset is one of them, this attribute will contain the name of the charset output will be converted to. Otherwise, it will diff --git a/lib-python/2.7/fileinput.py b/lib-python/2.7/fileinput.py --- a/lib-python/2.7/fileinput.py +++ b/lib-python/2.7/fileinput.py @@ -90,12 +90,11 @@ def input(files=None, inplace=0, backup="", bufsize=0, mode="r", openhook=None): - """input([files[, inplace[, backup[, mode[, openhook]]]]]) + """Return an instance of the FileInput class, which can be iterated. - Create an instance of the FileInput class. The instance will be used - as global state for the functions of this module, and is also returned - to use during iteration. The parameters to this function will be passed - along to the constructor of the FileInput class. + The parameters are passed to the constructor of the FileInput class. + The returned instance, in addition to being an iterator, + keeps global state for the functions of this module,. """ global _state if _state and _state._file: @@ -182,7 +181,7 @@ return _state.isstdin() class FileInput: - """class FileInput([files[, inplace[, backup[, mode[, openhook]]]]]) + """FileInput([files[, inplace[, backup[, bufsize[, mode[, openhook]]]]]]) Class FileInput is the implementation of the module; its methods filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(), diff --git a/lib-python/2.7/ftplib.py b/lib-python/2.7/ftplib.py --- a/lib-python/2.7/ftplib.py +++ b/lib-python/2.7/ftplib.py @@ -55,6 +55,8 @@ # The standard FTP server control port FTP_PORT = 21 +# The sizehint parameter passed to readline() calls +MAXLINE = 8192 # Exception raised when an error or invalid response is received @@ -101,6 +103,7 @@ debugging = 0 host = '' port = FTP_PORT + maxline = MAXLINE sock = None file = None welcome = None @@ -180,7 +183,9 @@ # Internal: return one line from the server, stripping CRLF. # Raise EOFError if the connection is closed def getline(self): - line = self.file.readline() + line = self.file.readline(self.maxline + 1) + if len(line) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if self.debugging > 1: print '*get*', self.sanitize(line) if not line: raise EOFError @@ -432,7 +437,9 @@ conn = self.transfercmd(cmd) fp = conn.makefile('rb') while 1: - line = fp.readline() + line = fp.readline(self.maxline + 1) + if len(line) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if self.debugging > 2: print '*retr*', repr(line) if not line: break @@ -485,7 +492,9 @@ self.voidcmd('TYPE A') conn = self.transfercmd(cmd) while 1: - buf = fp.readline() + buf = fp.readline(self.maxline + 1) + if len(buf) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if not buf: break if buf[-2:] != CRLF: if buf[-1] in CRLF: buf = buf[:-1] @@ -710,7 +719,9 @@ fp = conn.makefile('rb') try: while 1: - line = fp.readline() + line = fp.readline(self.maxline + 1) + if len(line) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if self.debugging > 2: print '*retr*', repr(line) if not line: break @@ -748,7 +759,9 @@ conn = self.transfercmd(cmd) try: while 1: - buf = fp.readline() + buf = fp.readline(self.maxline + 1) + if len(buf) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if not buf: break if buf[-2:] != CRLF: if buf[-1] in CRLF: buf = buf[:-1] @@ -905,7 +918,9 @@ fp = open(filename, "r") in_macro = 0 while 1: - line = fp.readline() + line = fp.readline(self.maxline + 1) + if len(line) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if not line: break if in_macro and line.strip(): macro_lines.append(line) diff --git a/lib-python/2.7/genericpath.py b/lib-python/2.7/genericpath.py --- a/lib-python/2.7/genericpath.py +++ b/lib-python/2.7/genericpath.py @@ -22,7 +22,7 @@ # This follows symbolic links, so both islink() and isdir() can be true -# for the same path ono systems that support symlinks +# for the same path on systems that support symlinks def isfile(path): """Test whether a path is a regular file""" try: diff --git a/lib-python/2.7/heapq.py b/lib-python/2.7/heapq.py --- a/lib-python/2.7/heapq.py +++ b/lib-python/2.7/heapq.py @@ -366,6 +366,7 @@ ''' _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration + _len = len h = [] h_append = h.append @@ -377,17 +378,21 @@ pass heapify(h) - while 1: + while _len(h) > 1: try: while 1: - v, itnum, next = s = h[0] # raises IndexError when h is empty + v, itnum, next = s = h[0] yield v s[0] = next() # raises StopIteration when exhausted _heapreplace(h, s) # restore heap condition except _StopIteration: _heappop(h) # remove empty iterator - except IndexError: - return + if h: + # fast case when only a single iterator remains + v, itnum, next = h[0] + yield v + for v in next.__self__: + yield v # Extend the implementations of nsmallest and nlargest to use a key= argument _nsmallest = nsmallest diff --git a/lib-python/2.7/idlelib/AutoComplete.py b/lib-python/2.7/idlelib/AutoComplete.py --- a/lib-python/2.7/idlelib/AutoComplete.py +++ b/lib-python/2.7/idlelib/AutoComplete.py @@ -156,12 +156,9 @@ if not comp_lists[0]: return self.autocompletewindow = self._make_autocomplete_window() - self.autocompletewindow.show_window(comp_lists, - "insert-%dc" % len(comp_start), - complete, - mode, - userWantsWin) - return True + return not self.autocompletewindow.show_window( + comp_lists, "insert-%dc" % len(comp_start), + complete, mode, userWantsWin) def fetch_completions(self, what, mode): """Return a pair of lists of completions for something. The first list diff --git a/lib-python/2.7/idlelib/AutoCompleteWindow.py b/lib-python/2.7/idlelib/AutoCompleteWindow.py --- a/lib-python/2.7/idlelib/AutoCompleteWindow.py +++ b/lib-python/2.7/idlelib/AutoCompleteWindow.py @@ -157,13 +157,14 @@ self.start = self.widget.get(self.startindex, "insert") if complete: completed = self._complete_string(self.start) + start = self.start self._change_start(completed) i = self._binary_search(completed) if self.completions[i] == completed and \ (i == len(self.completions)-1 or self.completions[i+1][:len(completed)] != completed): # There is exactly one matching completion - return + return completed == start self.userwantswindow = userWantsWin self.lasttypedstart = self.start diff --git a/lib-python/2.7/idlelib/Bindings.py b/lib-python/2.7/idlelib/Bindings.py --- a/lib-python/2.7/idlelib/Bindings.py +++ b/lib-python/2.7/idlelib/Bindings.py @@ -15,7 +15,7 @@ menudefs = [ # underscore prefixes character to underscore ('file', [ - ('_New Window', '<>'), + ('_New File', '<>'), ('_Open...', '<>'), ('Open _Module...', '<>'), ('Class _Browser', '<>'), @@ -98,6 +98,10 @@ # menu del menudefs[-1][1][0:2] + # Remove the 'Configure' entry from the options menu, it is in the + # application menu as 'Preferences' + del menudefs[-2][1][0:2] + default_keydefs = idleConf.GetCurrentKeySet() del sys diff --git a/lib-python/2.7/idlelib/CallTips.py b/lib-python/2.7/idlelib/CallTips.py --- a/lib-python/2.7/idlelib/CallTips.py +++ b/lib-python/2.7/idlelib/CallTips.py @@ -163,7 +163,7 @@ if fob.func_code.co_flags & 0x8: items.append("***") arg_text = ", ".join(items) - arg_text = "(%s)" % re.sub("\.\d+", "", arg_text) + arg_text = "(%s)" % re.sub("(?", arg_text) # See if we can use the docstring doc = getattr(ob, "__doc__", "") if doc: @@ -223,4 +223,6 @@ tests = (t1, t2, t3, t4, t5, t6, t7, TC, tc.t1, tc.t2, tc.t3, tc.t4, tc.t5, tc.t6, tc.t7) - test(tests) + # test(tests) + from unittest import main + main('idlelib.idle_test.test_calltips', verbosity=2, exit=False) diff --git a/lib-python/2.7/idlelib/Delegator.py b/lib-python/2.7/idlelib/Delegator.py --- a/lib-python/2.7/idlelib/Delegator.py +++ b/lib-python/2.7/idlelib/Delegator.py @@ -4,30 +4,22 @@ def __init__(self, delegate=None): self.delegate = delegate - self.__cache = {} + self.__cache = set() def __getattr__(self, name): attr = getattr(self.delegate, name) # May raise AttributeError setattr(self, name, attr) - self.__cache[name] = attr + self.__cache.add(name) return attr def resetcache(self): - for key in self.__cache.keys(): + for key in self.__cache: try: delattr(self, key) except AttributeError: pass self.__cache.clear() - def cachereport(self): - keys = self.__cache.keys() - keys.sort() - print keys - def setdelegate(self, delegate): self.resetcache() self.delegate = delegate - - def getdelegate(self): - return self.delegate diff --git a/lib-python/2.7/idlelib/EditorWindow.py b/lib-python/2.7/idlelib/EditorWindow.py --- a/lib-python/2.7/idlelib/EditorWindow.py +++ b/lib-python/2.7/idlelib/EditorWindow.py @@ -346,6 +346,36 @@ self.askinteger = tkSimpleDialog.askinteger self.showerror = tkMessageBox.showerror + self._highlight_workaround() # Fix selection tags on Windows + + def _highlight_workaround(self): + # On Windows, Tk removes painting of the selection + # tags which is different behavior than on Linux and Mac. + # See issue14146 for more information. + if not sys.platform.startswith('win'): + return + + text = self.text + text.event_add("<>", "") + text.event_add("<>", "") + def highlight_fix(focus): + sel_range = text.tag_ranges("sel") + if sel_range: + if focus == 'out': + HILITE_CONFIG = idleConf.GetHighlight( + idleConf.CurrentTheme(), 'hilite') + text.tag_config("sel_fix", HILITE_CONFIG) + text.tag_raise("sel_fix") + text.tag_add("sel_fix", *sel_range) + elif focus == 'in': + text.tag_remove("sel_fix", "1.0", "end") + + text.bind("<>", + lambda ev: highlight_fix("out")) + text.bind("<>", + lambda ev: highlight_fix("in")) + + def _filename_to_unicode(self, filename): """convert filename to unicode in order to display it in Tk""" if isinstance(filename, unicode) or not filename: @@ -437,7 +467,6 @@ ] if macosxSupport.runningAsOSXApp(): - del menu_specs[-3] menu_specs[-2] = ("windows", "_Window") @@ -660,7 +689,7 @@ # XXX Ought to insert current file's directory in front of path try: (f, file, (suffix, mode, type)) = _find_module(name) - except (NameError, ImportError), msg: + except (NameError, ImportError) as msg: tkMessageBox.showerror("Import error", str(msg), parent=self.text) return if type != imp.PY_SOURCE: @@ -804,7 +833,11 @@ menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1] for menubarItem in self.menudict.keys(): menu = self.menudict[menubarItem] - end = menu.index(END) + 1 + end = menu.index(END) + if end is None: + # Skip empty menus + continue + end += 1 for index in range(0, end): if menu.type(index) == 'command': accel = menu.entrycget(index, 'accelerator') @@ -861,11 +894,8 @@ "Load and update the recent files list and menus" rf_list = [] if os.path.exists(self.recent_files_path): - rf_list_file = open(self.recent_files_path,'r') - try: + with open(self.recent_files_path, 'r') as rf_list_file: rf_list = rf_list_file.readlines() - finally: - rf_list_file.close() if new_file: new_file = os.path.abspath(new_file) + '\n' if new_file in rf_list: diff --git a/lib-python/2.7/idlelib/FormatParagraph.py b/lib-python/2.7/idlelib/FormatParagraph.py --- a/lib-python/2.7/idlelib/FormatParagraph.py +++ b/lib-python/2.7/idlelib/FormatParagraph.py @@ -1,18 +1,19 @@ -# Extension to format a paragraph +"""Extension to format a paragraph or selection to a max width. -# Does basic, standard text formatting, and also understands Python -# comment blocks. Thus, for editing Python source code, this -# extension is really only suitable for reformatting these comment -# blocks or triple-quoted strings. +Does basic, standard text formatting, and also understands Python +comment blocks. Thus, for editing Python source code, this +extension is really only suitable for reformatting these comment +blocks or triple-quoted strings. -# Known problems with comment reformatting: -# * If there is a selection marked, and the first line of the -# selection is not complete, the block will probably not be detected -# as comments, and will have the normal "text formatting" rules -# applied. -# * If a comment block has leading whitespace that mixes tabs and -# spaces, they will not be considered part of the same block. -# * Fancy comments, like this bulleted list, arent handled :-) +Known problems with comment reformatting: +* If there is a selection marked, and the first line of the + selection is not complete, the block will probably not be detected + as comments, and will have the normal "text formatting" rules + applied. +* If a comment block has leading whitespace that mixes tabs and + spaces, they will not be considered part of the same block. +* Fancy comments, like this bulleted list, aren't handled :-) +""" import re from idlelib.configHandler import idleConf @@ -32,41 +33,31 @@ self.editwin = None def format_paragraph_event(self, event): - maxformatwidth = int(idleConf.GetOption('main','FormatParagraph', - 'paragraph', type='int')) + """Formats paragraph to a max width specified in idleConf. + + If text is selected, format_paragraph_event will start breaking lines + at the max width, starting from the beginning selection. + + If no text is selected, format_paragraph_event uses the current + cursor location to determine the paragraph (lines of text surrounded + by blank lines) and formats it. + """ + maxformatwidth = idleConf.GetOption( + 'main', 'FormatParagraph', 'paragraph', type='int') text = self.editwin.text first, last = self.editwin.get_selection_indices() if first and last: data = text.get(first, last) - comment_header = '' + comment_header = get_comment_header(data) else: first, last, comment_header, data = \ find_paragraph(text, text.index("insert")) if comment_header: - # Reformat the comment lines - convert to text sans header. - lines = data.split("\n") - lines = map(lambda st, l=len(comment_header): st[l:], lines) - data = "\n".join(lines) - # Reformat to maxformatwidth chars or a 20 char width, whichever is greater. - format_width = max(maxformatwidth - len(comment_header), 20) - newdata = reformat_paragraph(data, format_width) - # re-split and re-insert the comment header. - newdata = newdata.split("\n") - # If the block ends in a \n, we dont want the comment - # prefix inserted after it. (Im not sure it makes sense to - # reformat a comment block that isnt made of complete - # lines, but whatever!) Can't think of a clean solution, - # so we hack away - block_suffix = "" - if not newdata[-1]: - block_suffix = "\n" - newdata = newdata[:-1] - builder = lambda item, prefix=comment_header: prefix+item - newdata = '\n'.join(map(builder, newdata)) + block_suffix + newdata = reformat_comment(data, maxformatwidth, comment_header) else: - # Just a normal text format newdata = reformat_paragraph(data, maxformatwidth) text.tag_remove("sel", "1.0", "end") + if newdata != data: text.mark_set("insert", first) text.undo_block_start() @@ -79,31 +70,44 @@ return "break" def find_paragraph(text, mark): + """Returns the start/stop indices enclosing the paragraph that mark is in. + + Also returns the comment format string, if any, and paragraph of text + between the start/stop indices. + """ lineno, col = map(int, mark.split(".")) - line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) + line = text.get("%d.0" % lineno, "%d.end" % lineno) + + # Look for start of next paragraph if the index passed in is a blank line while text.compare("%d.0" % lineno, "<", "end") and is_all_white(line): lineno = lineno + 1 - line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) + line = text.get("%d.0" % lineno, "%d.end" % lineno) first_lineno = lineno comment_header = get_comment_header(line) comment_header_len = len(comment_header) + + # Once start line found, search for end of paragraph (a blank line) while get_comment_header(line)==comment_header and \ not is_all_white(line[comment_header_len:]): lineno = lineno + 1 - line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) + line = text.get("%d.0" % lineno, "%d.end" % lineno) last = "%d.0" % lineno - # Search back to beginning of paragraph + + # Search back to beginning of paragraph (first blank line before) lineno = first_lineno - 1 - line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) + line = text.get("%d.0" % lineno, "%d.end" % lineno) while lineno > 0 and \ get_comment_header(line)==comment_header and \ not is_all_white(line[comment_header_len:]): lineno = lineno - 1 - line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) + line = text.get("%d.0" % lineno, "%d.end" % lineno) first = "%d.0" % (lineno+1) + return first, last, comment_header, text.get(first, last) +# This should perhaps be replaced with textwrap.wrap def reformat_paragraph(data, limit): + """Return data reformatted to specified width (limit).""" lines = data.split("\n") i = 0 n = len(lines) @@ -126,7 +130,7 @@ if not word: continue # Can happen when line ends in whitespace if len((partial + word).expandtabs()) > limit and \ - partial != indent1: + partial != indent1: new.append(partial.rstrip()) partial = indent2 partial = partial + word + " " @@ -138,13 +142,50 @@ new.extend(lines[i:]) return "\n".join(new) +def reformat_comment(data, limit, comment_header): + """Return data reformatted to specified width with comment header.""" + + # Remove header from the comment lines + lc = len(comment_header) + data = "\n".join(line[lc:] for line in data.split("\n")) + # Reformat to maxformatwidth chars or a 20 char width, + # whichever is greater. + format_width = max(limit - len(comment_header), 20) + newdata = reformat_paragraph(data, format_width) + # re-split and re-insert the comment header. + newdata = newdata.split("\n") + # If the block ends in a \n, we dont want the comment prefix + # inserted after it. (Im not sure it makes sense to reformat a + # comment block that is not made of complete lines, but whatever!) + # Can't think of a clean solution, so we hack away + block_suffix = "" + if not newdata[-1]: + block_suffix = "\n" + newdata = newdata[:-1] + return '\n'.join(comment_header+line for line in newdata) + block_suffix + def is_all_white(line): + """Return True if line is empty or all whitespace.""" + return re.match(r"^\s*$", line) is not None def get_indent(line): - return re.match(r"^(\s*)", line).group() + """Return the initial space or tab indent of line.""" + return re.match(r"^([ \t]*)", line).group() def get_comment_header(line): - m = re.match(r"^(\s*#*)", line) + """Return string with leading whitespace and '#' from line or ''. + + A null return indicates that the line is not a comment line. A non- + null return, such as ' #', will be used to find the other lines of + a comment block with the same indent. + """ + m = re.match(r"^([ \t]*#*)", line) if m is None: return "" return m.group(1) + +if __name__ == "__main__": + from test import support; support.use_resources = ['gui'] + import unittest + unittest.main('idlelib.idle_test.test_formatparagraph', + verbosity=2, exit=False) diff --git a/lib-python/2.7/idlelib/GrepDialog.py b/lib-python/2.7/idlelib/GrepDialog.py --- a/lib-python/2.7/idlelib/GrepDialog.py +++ b/lib-python/2.7/idlelib/GrepDialog.py @@ -81,36 +81,24 @@ hits = 0 for fn in list: try: - f = open(fn) - except IOError, msg: + with open(fn) as f: + for lineno, line in enumerate(f, 1): + if line[-1:] == '\n': + line = line[:-1] + if prog.search(line): + sys.stdout.write("%s: %s: %s\n" % + (fn, lineno, line)) + hits += 1 + except IOError as msg: print msg - continue - lineno = 0 - while 1: - block = f.readlines(100000) - if not block: - break - for line in block: - lineno = lineno + 1 - if line[-1:] == '\n': - line = line[:-1] - if prog.search(line): - sys.stdout.write("%s: %s: %s\n" % (fn, lineno, line)) - hits = hits + 1 - if hits: - if hits == 1: - s = "" - else: - s = "s" - print "Found", hits, "hit%s." % s - print "(Hint: right-click to open locations.)" - else: - print "No hits." + print(("Hits found: %s\n" + "(Hint: right-click to open locations.)" + % hits) if hits else "No hits.") def findfiles(self, dir, base, rec): try: names = os.listdir(dir or os.curdir) - except os.error, msg: + except os.error as msg: print msg return [] list = [] @@ -131,3 +119,9 @@ if self.top: self.top.grab_release() self.top.withdraw() + +if __name__ == "__main__": + # A human test is a bit tricky since EditorWindow() imports this module. + # Hence Idle must be restarted after editing this file for a live test. + import unittest + unittest.main('idlelib.idle_test.test_grep', verbosity=2, exit=False) diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py --- a/lib-python/2.7/idlelib/IOBinding.py +++ b/lib-python/2.7/idlelib/IOBinding.py @@ -71,7 +71,7 @@ encoding = encoding.lower() -coding_re = re.compile("coding[:=]\s*([-\w_.]+)") +coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)') class EncodingMessage(SimpleDialog): "Inform user that an encoding declaration is needed." @@ -125,11 +125,12 @@ Raise LookupError if the encoding is declared but unknown. """ # Only consider the first two lines - str = str.split("\n")[:2] - str = "\n".join(str) - - match = coding_re.search(str) - if not match: + lst = str.split("\n", 2)[:2] + for line in lst: + match = coding_re.match(line) + if match is not None: + break + else: return None name = match.group(1) # Check whether the encoding is known @@ -248,10 +249,9 @@ try: # open the file in binary mode so that we can handle # end-of-line convention ourselves. - f = open(filename,'rb') - chars = f.read() - f.close() - except IOError, msg: + with open(filename, 'rb') as f: + chars = f.read() + except IOError as msg: tkMessageBox.showerror("I/O Error", str(msg), master=self.text) return False @@ -294,7 +294,7 @@ # Next look for coding specification try: enc = coding_spec(chars) - except LookupError, name: + except LookupError as name: tkMessageBox.showerror( title="Error loading the file", message="The encoding '%s' is not known to this Python "\ @@ -383,12 +383,10 @@ if self.eol_convention != "\n": chars = chars.replace("\n", self.eol_convention) try: - f = open(filename, "wb") - f.write(chars) - f.flush() - f.close() + with open(filename, "wb") as f: + f.write(chars) return True - except IOError, msg: + except IOError as msg: tkMessageBox.showerror("I/O Error", str(msg), master=self.text) return False @@ -408,7 +406,7 @@ try: enc = coding_spec(chars) failed = None - except LookupError, msg: + except LookupError as msg: failed = msg enc = None if enc: diff --git a/lib-python/2.7/idlelib/IdleHistory.py b/lib-python/2.7/idlelib/IdleHistory.py --- a/lib-python/2.7/idlelib/IdleHistory.py +++ b/lib-python/2.7/idlelib/IdleHistory.py @@ -1,81 +1,93 @@ +"Implement Idle Shell history mechanism with History class" + from idlelib.configHandler import idleConf class History: + ''' Implement Idle Shell history mechanism. - def __init__(self, text, output_sep = "\n"): + store - Store source statement (called from PyShell.resetoutput). + fetch - Fetch stored statement matching prefix already entered. + history_next - Bound to <> event (default Alt-N). + history_prev - Bound to <> event (default Alt-P). + ''' + def __init__(self, text): + '''Initialize data attributes and bind event methods. + + .text - Idle wrapper of tk Text widget, with .bell(). + .history - source statements, possibly with multiple lines. + .prefix - source already entered at prompt; filters history list. + .pointer - index into history. + .cyclic - wrap around history list (or not). + ''' self.text = text self.history = [] - self.history_prefix = None - self.history_pointer = None - self.output_sep = output_sep + self.prefix = None + self.pointer = None self.cyclic = idleConf.GetOption("main", "History", "cyclic", 1, "bool") text.bind("<>", self.history_prev) text.bind("<>", self.history_next) def history_next(self, event): - self.history_do(0) + "Fetch later statement; start with ealiest if cyclic." + self.fetch(reverse=False) return "break" def history_prev(self, event): - self.history_do(1) + "Fetch earlier statement; start with most recent." + self.fetch(reverse=True) return "break" - def _get_source(self, start, end): - # Get source code from start index to end index. Lines in the - # text control may be separated by sys.ps2 . - lines = self.text.get(start, end).split(self.output_sep) - return "\n".join(lines) + def fetch(self, reverse): + '''Fetch statememt and replace current line in text widget. - def _put_source(self, where, source): - output = self.output_sep.join(source.split("\n")) - self.text.insert(where, output) - - def history_do(self, reverse): + Set prefix and pointer as needed for successive fetches. + Reset them to None, None when returning to the start line. + Sound bell when return to start line or cannot leave a line + because cyclic is False. + ''' nhist = len(self.history) - pointer = self.history_pointer - prefix = self.history_prefix + pointer = self.pointer + prefix = self.prefix if pointer is not None and prefix is not None: if self.text.compare("insert", "!=", "end-1c") or \ - self._get_source("iomark", "end-1c") != self.history[pointer]: + self.text.get("iomark", "end-1c") != self.history[pointer]: pointer = prefix = None + self.text.mark_set("insert", "end-1c") # != after cursor move if pointer is None or prefix is None: - prefix = self._get_source("iomark", "end-1c") + prefix = self.text.get("iomark", "end-1c") if reverse: - pointer = nhist + pointer = nhist # will be decremented else: if self.cyclic: - pointer = -1 - else: + pointer = -1 # will be incremented + else: # abort history_next self.text.bell() return nprefix = len(prefix) while 1: - if reverse: - pointer = pointer - 1 - else: - pointer = pointer + 1 + pointer += -1 if reverse else 1 if pointer < 0 or pointer >= nhist: self.text.bell() - if not self.cyclic and pointer < 0: + if not self.cyclic and pointer < 0: # abort history_prev return else: - if self._get_source("iomark", "end-1c") != prefix: + if self.text.get("iomark", "end-1c") != prefix: self.text.delete("iomark", "end-1c") - self._put_source("iomark", prefix) + self.text.insert("iomark", prefix) pointer = prefix = None break item = self.history[pointer] if item[:nprefix] == prefix and len(item) > nprefix: self.text.delete("iomark", "end-1c") - self._put_source("iomark", item) + self.text.insert("iomark", item) break - self.text.mark_set("insert", "end-1c") self.text.see("insert") self.text.tag_remove("sel", "1.0", "end") - self.history_pointer = pointer - self.history_prefix = prefix + self.pointer = pointer + self.prefix = prefix - def history_store(self, source): + def store(self, source): + "Store Shell input statement into history list." source = source.strip() if len(source) > 2: # avoid duplicates @@ -84,5 +96,11 @@ except ValueError: pass self.history.append(source) - self.history_pointer = None - self.history_prefix = None + self.pointer = None + self.prefix = None + +if __name__ == "__main__": + from test import test_support as support + support.use_resources = ['gui'] + from unittest import main + main('idlelib.idle_test.test_idlehistory', verbosity=2, exit=False) diff --git a/lib-python/2.7/idlelib/PathBrowser.py b/lib-python/2.7/idlelib/PathBrowser.py --- a/lib-python/2.7/idlelib/PathBrowser.py +++ b/lib-python/2.7/idlelib/PathBrowser.py @@ -92,4 +92,5 @@ mainloop() if __name__ == "__main__": - main() + from unittest import main + main('idlelib.idle_test.test_pathbrowser', verbosity=2, exit=False) diff --git a/lib-python/2.7/idlelib/PyShell.py b/lib-python/2.7/idlelib/PyShell.py --- a/lib-python/2.7/idlelib/PyShell.py +++ b/lib-python/2.7/idlelib/PyShell.py @@ -50,35 +50,55 @@ # internal warnings to the console. ScriptBinding.check_syntax() will # temporarily redirect the stream to the shell window to display warnings when # checking user's code. -global warning_stream -warning_stream = sys.__stderr__ -try: - import warnings -except ImportError: - pass -else: - def idle_showwarning(message, category, filename, lineno, - file=None, line=None): - if file is None: - file = warning_stream - try: - file.write(warnings.formatwarning(message, category, filename, - lineno, line=line)) - except IOError: - pass ## file (probably __stderr__) is invalid, warning dropped. - warnings.showwarning = idle_showwarning - def idle_formatwarning(message, category, filename, lineno, line=None): - """Format warnings the IDLE way""" - s = "\nWarning (from warnings module):\n" - s += ' File \"%s\", line %s\n' % (filename, lineno) - if line is None: - line = linecache.getline(filename, lineno) - line = line.strip() - if line: - s += " %s\n" % line - s += "%s: %s\n>>> " % (category.__name__, message) - return s - warnings.formatwarning = idle_formatwarning +warning_stream = sys.__stderr__ # None, at least on Windows, if no console. +import warnings + +def idle_formatwarning(message, category, filename, lineno, line=None): + """Format warnings the IDLE way.""" + + s = "\nWarning (from warnings module):\n" + s += ' File \"%s\", line %s\n' % (filename, lineno) + if line is None: + line = linecache.getline(filename, lineno) + line = line.strip() + if line: + s += " %s\n" % line + s += "%s: %s\n" % (category.__name__, message) + return s + +def idle_showwarning( + message, category, filename, lineno, file=None, line=None): + """Show Idle-format warning (after replacing warnings.showwarning). + + The differences are the formatter called, the file=None replacement, + which can be None, the capture of the consequence AttributeError, + and the output of a hard-coded prompt. + """ + if file is None: + file = warning_stream + try: + file.write(idle_formatwarning( + message, category, filename, lineno, line=line)) + file.write(">>> ") + except (AttributeError, IOError): + pass # if file (probably __stderr__) is invalid, skip warning. + +_warnings_showwarning = None + +def capture_warnings(capture): + "Replace warning.showwarning with idle_showwarning, or reverse." + + global _warnings_showwarning + if capture: + if _warnings_showwarning is None: + _warnings_showwarning = warnings.showwarning + warnings.showwarning = idle_showwarning + else: + if _warnings_showwarning is not None: + warnings.showwarning = _warnings_showwarning + _warnings_showwarning = None + +capture_warnings(True) def extended_linecache_checkcache(filename=None, orig_checkcache=linecache.checkcache): @@ -370,6 +390,7 @@ self.port = PORT self.original_compiler_flags = self.compile.compiler.flags + _afterid = None rpcclt = None rpcpid = None @@ -409,7 +430,7 @@ try: self.rpcclt = MyRPCClient(addr) break - except socket.error, err: + except socket.error as err: pass else: self.display_port_binding_error() @@ -430,7 +451,7 @@ self.rpcclt.listening_sock.settimeout(10) try: self.rpcclt.accept() - except socket.timeout, err: + except socket.timeout as err: self.display_no_subprocess_error() return None self.rpcclt.register("console", self.tkconsole) @@ -465,7 +486,7 @@ self.spawn_subprocess() try: self.rpcclt.accept() - except socket.timeout, err: + except socket.timeout as err: self.display_no_subprocess_error() return None self.transfer_path(with_cwd=with_cwd) @@ -497,6 +518,8 @@ threading.Thread(target=self.__request_interrupt).start() def kill_subprocess(self): + if self._afterid is not None: + self.tkconsole.text.after_cancel(self._afterid) try: self.rpcclt.close() except AttributeError: # no socket @@ -569,8 +592,8 @@ pass # Reschedule myself if not self.tkconsole.closing: - self.tkconsole.text.after(self.tkconsole.pollinterval, - self.poll_subprocess) + self._afterid = self.tkconsole.text.after( + self.tkconsole.pollinterval, self.poll_subprocess) debugger = None @@ -844,7 +867,6 @@ ] if macosxSupport.runningAsOSXApp(): - del menu_specs[-3] menu_specs[-2] = ("windows", "_Window") @@ -988,10 +1010,6 @@ self.stop_readline() self.canceled = True self.closing = True - # Wait for poll_subprocess() rescheduling to stop - self.text.after(2 * self.pollinterval, self.close2) - - def close2(self): return EditorWindow.close(self) def _close(self): @@ -1260,7 +1278,7 @@ def resetoutput(self): source = self.text.get("iomark", "end-1c") if self.history: - self.history.history_store(source) + self.history.store(source) if self.text.get("end-2c") != "\n": self.text.insert("end-1c", "\n") self.text.mark_set("iomark", "end-1c") @@ -1430,6 +1448,7 @@ def main(): global flist, root, use_subprocess + capture_warnings(True) use_subprocess = True enable_shell = False enable_edit = False @@ -1439,7 +1458,7 @@ startup = False try: opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:") - except getopt.error, msg: + except getopt.error as msg: sys.stderr.write("Error: %s\n" % str(msg)) sys.stderr.write(usage_msg) sys.exit(2) @@ -1562,7 +1581,10 @@ while flist.inversedict: # keep IDLE running while files are open. root.mainloop() root.destroy() + capture_warnings(False) if __name__ == "__main__": sys.modules['PyShell'] = sys.modules['__main__'] main() + +capture_warnings(False) # Make sure turned off; see issue 18081 diff --git a/lib-python/2.7/idlelib/RstripExtension.py b/lib-python/2.7/idlelib/RstripExtension.py --- a/lib-python/2.7/idlelib/RstripExtension.py +++ b/lib-python/2.7/idlelib/RstripExtension.py @@ -1,13 +1,9 @@ 'Provides "Strip trailing whitespace" under the "Format" menu.' -__author__ = "Roger D. Serwy " - class RstripExtension: menudefs = [ - ('format', [None, - ('Strip trailing whitespace', '<>'), - ]),] + ('format', [None, ('Strip trailing whitespace', '<>'), ] ), ] def __init__(self, editwin): self.editwin = editwin @@ -20,10 +16,18 @@ undo.undo_block_start() - end_line = int(float(text.index('end'))) + 1 + end_line = int(float(text.index('end'))) for cur in range(1, end_line): - txt = text.get('%i.0' % cur, '%i.0 lineend' % cur) + txt = text.get('%i.0' % cur, '%i.end' % cur) + raw = len(txt) cut = len(txt.rstrip()) - text.delete('%i.%i' % (cur, cut), '%i.0 lineend' % cur) + # Since text.delete() marks file as changed, even if not, + # only call it when needed to actually delete something. + if cut < raw: + text.delete('%i.%i' % (cur, cut), '%i.end' % cur) undo.undo_block_stop() + +if __name__ == "__main__": + import unittest + unittest.main('idlelib.idle_test.test_rstrip', verbosity=2, exit=False) diff --git a/lib-python/2.7/idlelib/ScriptBinding.py b/lib-python/2.7/idlelib/ScriptBinding.py --- a/lib-python/2.7/idlelib/ScriptBinding.py +++ b/lib-python/2.7/idlelib/ScriptBinding.py @@ -70,13 +70,13 @@ f = open(filename, 'r') try: tabnanny.process_tokens(tokenize.generate_tokens(f.readline)) - except tokenize.TokenError, msg: + except tokenize.TokenError as msg: msgtxt, (lineno, start) = msg self.editwin.gotoline(lineno) self.errorbox("Tabnanny Tokenizing Error", "Token Error: %s" % msgtxt) return False - except tabnanny.NannyNag, nag: + except tabnanny.NannyNag as nag: # The error messages from tabnanny are too confusing... self.editwin.gotoline(nag.get_lineno()) self.errorbox("Tab/space error", indent_message) @@ -87,9 +87,8 @@ self.shell = shell = self.flist.open_shell() saved_stream = shell.get_warning_stream() shell.set_warning_stream(shell.stderr) - f = open(filename, 'r') - source = f.read() - f.close() + with open(filename, 'r') as f: + source = f.read() if '\r' in source: source = re.sub(r"\r\n", "\n", source) source = re.sub(r"\r", "\n", source) @@ -101,7 +100,7 @@ try: # If successful, return the compiled code return compile(source, filename, "exec") - except (SyntaxError, OverflowError, ValueError), err: + except (SyntaxError, OverflowError, ValueError) as err: try: msg, (errorfilename, lineno, offset, line) = err if not errorfilename: @@ -152,16 +151,16 @@ dirname = os.path.dirname(filename) # XXX Too often this discards arguments the user just set... interp.runcommand("""if 1: - _filename = %r + __file__ = {filename!r} import sys as _sys from os.path import basename as _basename if (not _sys.argv or - _basename(_sys.argv[0]) != _basename(_filename)): - _sys.argv = [_filename] + _basename(_sys.argv[0]) != _basename(__file__)): + _sys.argv = [__file__] import os as _os From noreply at buildbot.pypy.org Sat Mar 1 22:24:08 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 22:24:08 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.5: close branch for switch to 2.7.6 Message-ID: <20140301212408.670901C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.5 Changeset: r69578:e6485190533b Date: 2014-03-01 15:45 -0500 http://bitbucket.org/pypy/pypy/changeset/e6485190533b/ Log: close branch for switch to 2.7.6 From noreply at buildbot.pypy.org Sat Mar 1 22:24:10 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 22:24:10 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: merge stdlib 2.7.6 from vendor/stdlib Message-ID: <20140301212410.77C4F1C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69579:cfd0f4c85cb2 Date: 2014-03-01 16:20 -0500 http://bitbucket.org/pypy/pypy/changeset/cfd0f4c85cb2/ Log: merge stdlib 2.7.6 from vendor/stdlib diff too long, truncating to 2000 out of 10531 lines diff --git a/lib-python/2.7/CGIHTTPServer.py b/lib-python/2.7/CGIHTTPServer.py --- a/lib-python/2.7/CGIHTTPServer.py +++ b/lib-python/2.7/CGIHTTPServer.py @@ -105,18 +105,17 @@ def run_cgi(self): """Execute a CGI script.""" - path = self.path dir, rest = self.cgi_info - i = path.find('/', len(dir) + 1) + i = rest.find('/') while i >= 0: - nextdir = path[:i] - nextrest = path[i+1:] + nextdir = rest[:i] + nextrest = rest[i+1:] scriptdir = self.translate_path(nextdir) if os.path.isdir(scriptdir): dir, rest = nextdir, nextrest - i = path.find('/', len(dir) + 1) + i = rest.find('/') else: break diff --git a/lib-python/2.7/Cookie.py b/lib-python/2.7/Cookie.py --- a/lib-python/2.7/Cookie.py +++ b/lib-python/2.7/Cookie.py @@ -238,7 +238,7 @@ # a two-way quoting algorithm. Any non-text character is translated # into a 4 character sequence: a forward-slash followed by the # three-digit octal equivalent of the character. Any '\' or '"' is -# quoted with a preceeding '\' slash. +# quoted with a preceding '\' slash. # # These are taken from RFC2068 and RFC2109. # _LegalChars is the list of chars which don't require "'s diff --git a/lib-python/2.7/Queue.py b/lib-python/2.7/Queue.py --- a/lib-python/2.7/Queue.py +++ b/lib-python/2.7/Queue.py @@ -109,7 +109,7 @@ If optional args 'block' is true and 'timeout' is None (the default), block if necessary until a free slot is available. If 'timeout' is - a positive number, it blocks at most 'timeout' seconds and raises + a non-negative number, it blocks at most 'timeout' seconds and raises the Full exception if no free slot was available within that time. Otherwise ('block' is false), put an item on the queue if a free slot is immediately available, else raise the Full exception ('timeout' @@ -125,7 +125,7 @@ while self._qsize() == self.maxsize: self.not_full.wait() elif timeout < 0: - raise ValueError("'timeout' must be a positive number") + raise ValueError("'timeout' must be a non-negative number") else: endtime = _time() + timeout while self._qsize() == self.maxsize: @@ -152,7 +152,7 @@ If optional args 'block' is true and 'timeout' is None (the default), block if necessary until an item is available. If 'timeout' is - a positive number, it blocks at most 'timeout' seconds and raises + a non-negative number, it blocks at most 'timeout' seconds and raises the Empty exception if no item was available within that time. Otherwise ('block' is false), return an item if one is immediately available, else raise the Empty exception ('timeout' is ignored @@ -167,7 +167,7 @@ while not self._qsize(): self.not_empty.wait() elif timeout < 0: - raise ValueError("'timeout' must be a positive number") + raise ValueError("'timeout' must be a non-negative number") else: endtime = _time() + timeout while not self._qsize(): diff --git a/lib-python/2.7/SimpleHTTPServer.py b/lib-python/2.7/SimpleHTTPServer.py --- a/lib-python/2.7/SimpleHTTPServer.py +++ b/lib-python/2.7/SimpleHTTPServer.py @@ -149,6 +149,8 @@ # abandon query parameters path = path.split('?',1)[0] path = path.split('#',1)[0] + # Don't forget explicit trailing slash when normalizing. Issue17324 + trailing_slash = path.rstrip().endswith('/') path = posixpath.normpath(urllib.unquote(path)) words = path.split('/') words = filter(None, words) @@ -158,6 +160,8 @@ head, word = os.path.split(word) if word in (os.curdir, os.pardir): continue path = os.path.join(path, word) + if trailing_slash: + path += '/' return path def copyfile(self, source, outputfile): diff --git a/lib-python/2.7/_osx_support.py b/lib-python/2.7/_osx_support.py --- a/lib-python/2.7/_osx_support.py +++ b/lib-python/2.7/_osx_support.py @@ -53,7 +53,7 @@ def _read_output(commandstring): - """Output from succesful command execution or None""" + """Output from successful command execution or None""" # Similar to os.popen(commandstring, "r").read(), # but without actually using os.popen because that # function is not usable during python bootstrap. @@ -68,7 +68,7 @@ with contextlib.closing(fp) as fp: cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) - return fp.read().decode('utf-8').strip() if not os.system(cmd) else None + return fp.read().strip() if not os.system(cmd) else None def _find_build_tool(toolname): @@ -152,7 +152,7 @@ # are not installed. # # Futhermore, the compiler that can be used varies between - # Xcode releases. Upto Xcode 4 it was possible to use 'gcc-4.2' + # Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2' # as the compiler, after that 'clang' should be used because # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that # miscompiles Python. @@ -192,7 +192,7 @@ if cc != oldcc: # Found a replacement compiler. - # Modify config vars using new compiler, if not already explictly + # Modify config vars using new compiler, if not already explicitly # overriden by an env variable, preserving additional arguments. for cv in _COMPILER_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: @@ -235,13 +235,19 @@ if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None: # NOTE: Cannot use subprocess here because of bootstrap # issues when building Python itself - status = os.system("'%s' -arch ppc -x c /dev/null 2>/dev/null"%( - _config_vars['CC'].replace("'", "'\"'\"'"),)) - # The Apple compiler drivers return status 255 if no PPC - if (status >> 8) == 255: - # Compiler doesn't support PPC, remove the related - # '-arch' flags if not explicitly overridden by an - # environment variable + status = os.system( + """echo 'int main{};' | """ + """'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null""" + %(_config_vars['CC'].replace("'", "'\"'\"'"),)) + if status: + # The compile failed for some reason. Because of differences + # across Xcode and compiler versions, there is no reliable way + # to be sure why it failed. Assume here it was due to lack of + # PPC support and remove the related '-arch' flags from each + # config variables not explicitly overriden by an environment + # variable. If the error was for some other reason, we hope the + # failure will show up again when trying to compile an extension + # module. for cv in _UNIVERSAL_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] @@ -274,7 +280,7 @@ # compile an extension using an SDK that is not present # on the current machine it is better to not use an SDK # than to fail. This is particularly important with - # the standalong Command Line Tools alternative to a + # the standalone Command Line Tools alternative to a # full-blown Xcode install since the CLT packages do not # provide SDKs. If the SDK is not present, it is assumed # that the header files and dev libs have been installed @@ -378,7 +384,7 @@ compilers are present, i.e. when installing pure Python dists. Customization of compiler paths and detection of unavailable archs is deferred - until the first extention module build is + until the first extension module build is requested (in distutils.sysconfig.customize_compiler). Currently called from distutils.sysconfig diff --git a/lib-python/2.7/_strptime.py b/lib-python/2.7/_strptime.py --- a/lib-python/2.7/_strptime.py +++ b/lib-python/2.7/_strptime.py @@ -222,7 +222,7 @@ """Convert a list to a regex string for matching a directive. Want possible matching values to be from longest to shortest. This - prevents the possibility of a match occuring for a value that also + prevents the possibility of a match occurring for a value that also a substring of a larger value that should have matched (e.g., 'abc' matching when 'abcdef' should have been the match). diff --git a/lib-python/2.7/_weakrefset.py b/lib-python/2.7/_weakrefset.py --- a/lib-python/2.7/_weakrefset.py +++ b/lib-python/2.7/_weakrefset.py @@ -171,6 +171,12 @@ return NotImplemented return self.data == set(ref(item) for item in other) + def __ne__(self, other): + opposite = self.__eq__(other) + if opposite is NotImplemented: + return NotImplemented + return not opposite + def symmetric_difference(self, other): newset = self.copy() newset.symmetric_difference_update(other) diff --git a/lib-python/2.7/aifc.py b/lib-python/2.7/aifc.py --- a/lib-python/2.7/aifc.py +++ b/lib-python/2.7/aifc.py @@ -123,7 +123,7 @@ compression type, and then write audio frames using writeframesraw. When all frames have been written, either call writeframes('') or close() to patch up the sizes in the header. -Marks can be added anytime. If there are any marks, ypu must call +Marks can be added anytime. If there are any marks, you must call close() after all frames have been written. The close() method is called automatically when the class instance is destroyed. @@ -480,31 +480,30 @@ pass else: self._convert = self._adpcm2lin - self._framesize = self._framesize // 4 + self._sampwidth = 2 return # for ULAW and ALAW try Compression Library try: import cl except ImportError: - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): try: import audioop self._convert = self._ulaw2lin - self._framesize = self._framesize // 2 + self._sampwidth = 2 return except ImportError: pass raise Error, 'cannot read compressed AIFF-C files' - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): scheme = cl.G711_ULAW - self._framesize = self._framesize // 2 - elif self._comptype == 'ALAW': + elif self._comptype in ('ALAW', 'alaw'): scheme = cl.G711_ALAW - self._framesize = self._framesize // 2 else: raise Error, 'unsupported compression type' self._decomp = cl.OpenDecompressor(scheme) self._convert = self._decomp_data + self._sampwidth = 2 else: self._comptype = 'NONE' self._compname = 'not compressed' @@ -655,7 +654,7 @@ def setcomptype(self, comptype, compname): if self._nframeswritten: raise Error, 'cannot change parameters after starting to write' - if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'): + if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): raise Error, 'unsupported compression type' self._comptype = comptype self._compname = compname @@ -675,7 +674,7 @@ nchannels, sampwidth, framerate, nframes, comptype, compname = info if self._nframeswritten: raise Error, 'cannot change parameters after starting to write' - if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'): + if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): raise Error, 'unsupported compression type' self.setnchannels(nchannels) self.setsampwidth(sampwidth) @@ -804,7 +803,7 @@ try: import cl except ImportError: - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): try: import audioop self._convert = self._lin2ulaw @@ -812,9 +811,9 @@ except ImportError: pass raise Error, 'cannot write compressed AIFF-C files' - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): scheme = cl.G711_ULAW - elif self._comptype == 'ALAW': + elif self._comptype in ('ALAW', 'alaw'): scheme = cl.G711_ALAW else: raise Error, 'unsupported compression type' @@ -867,7 +866,10 @@ _write_short(self._file, self._nchannels) self._nframes_pos = self._file.tell() _write_ulong(self._file, self._nframes) - _write_short(self._file, self._sampwidth * 8) + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): + _write_short(self._file, 8) + else: + _write_short(self._file, self._sampwidth * 8) _write_float(self._file, self._framerate) if self._aifc: self._file.write(self._comptype) @@ -953,23 +955,27 @@ sys.argv.append('/usr/demos/data/audio/bach.aiff') fn = sys.argv[1] f = open(fn, 'r') - print "Reading", fn - print "nchannels =", f.getnchannels() - print "nframes =", f.getnframes() - print "sampwidth =", f.getsampwidth() - print "framerate =", f.getframerate() - print "comptype =", f.getcomptype() - print "compname =", f.getcompname() - if sys.argv[2:]: - gn = sys.argv[2] - print "Writing", gn - g = open(gn, 'w') - g.setparams(f.getparams()) - while 1: - data = f.readframes(1024) - if not data: - break - g.writeframes(data) - g.close() + try: + print "Reading", fn + print "nchannels =", f.getnchannels() + print "nframes =", f.getnframes() + print "sampwidth =", f.getsampwidth() + print "framerate =", f.getframerate() + print "comptype =", f.getcomptype() + print "compname =", f.getcompname() + if sys.argv[2:]: + gn = sys.argv[2] + print "Writing", gn + g = open(gn, 'w') + try: + g.setparams(f.getparams()) + while 1: + data = f.readframes(1024) + if not data: + break + g.writeframes(data) + finally: + g.close() + print "Done." + finally: f.close() - print "Done." diff --git a/lib-python/2.7/calendar.py b/lib-python/2.7/calendar.py --- a/lib-python/2.7/calendar.py +++ b/lib-python/2.7/calendar.py @@ -220,7 +220,7 @@ def yeardatescalendar(self, year, width=3): """ Return the data for the specified year ready for formatting. The return - value is a list of month rows. Each month row contains upto width months. + value is a list of month rows. Each month row contains up to width months. Each month contains between 4 and 6 weeks and each week contains 1-7 days. Days are datetime.date objects. """ diff --git a/lib-python/2.7/cgi.py b/lib-python/2.7/cgi.py --- a/lib-python/2.7/cgi.py +++ b/lib-python/2.7/cgi.py @@ -697,6 +697,9 @@ if not line: self.done = -1 break + if delim == "\r": + line = delim + line + delim = "" if line[:2] == "--" and last_line_lfend: strippedline = line.strip() if strippedline == next: @@ -713,6 +716,12 @@ delim = "\n" line = line[:-1] last_line_lfend = True + elif line[-1] == "\r": + # We may interrupt \r\n sequences if they span the 2**16 + # byte boundary + delim = "\r" + line = line[:-1] + last_line_lfend = False else: delim = "" last_line_lfend = False diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -275,6 +275,12 @@ 'Return self as a plain tuple. Used by copy and pickle.' return tuple(self) + __dict__ = _property(_asdict) + + def __getstate__(self): + 'Exclude the OrderedDict from pickling' + pass + {field_defs} ''' @@ -371,7 +377,7 @@ result = namespace[typename] # For pickling to work, the __module__ variable needs to be set to the frame - # where the named tuple is created. Bypass this step in enviroments where + # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython). try: diff --git a/lib-python/2.7/compiler/pyassem.py b/lib-python/2.7/compiler/pyassem.py --- a/lib-python/2.7/compiler/pyassem.py +++ b/lib-python/2.7/compiler/pyassem.py @@ -125,7 +125,7 @@ # Make sure every block appears in dominators, even if no # other block must precede it. dominators.setdefault(b, set()) - # preceeding blocks dominate following blocks + # preceding blocks dominate following blocks for c in b.get_followers(): while 1: dominators.setdefault(c, set()).add(b) diff --git a/lib-python/2.7/csv.py b/lib-python/2.7/csv.py --- a/lib-python/2.7/csv.py +++ b/lib-python/2.7/csv.py @@ -261,8 +261,9 @@ # if we see an extra quote between delimiters, we've got a # double quoted format - dq_regexp = re.compile(r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \ - {'delim':delim, 'quote':quotechar}, re.MULTILINE) + dq_regexp = re.compile( + r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \ + {'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE) diff --git a/lib-python/2.7/ctypes/test/runtests.py b/lib-python/2.7/ctypes/test/runtests.py --- a/lib-python/2.7/ctypes/test/runtests.py +++ b/lib-python/2.7/ctypes/test/runtests.py @@ -2,7 +2,7 @@ Run all tests found in this directory, and print a summary of the results. Command line flags: - -q quiet mode: don't prnt anything while the tests are running + -q quiet mode: don't print anything while the tests are running -r run tests repeatedly, look for refcount leaks -u Add resources to the lits of allowed resources. '*' allows all diff --git a/lib-python/2.7/ctypes/test/test_cfuncs.py b/lib-python/2.7/ctypes/test/test_cfuncs.py --- a/lib-python/2.7/ctypes/test/test_cfuncs.py +++ b/lib-python/2.7/ctypes/test/test_cfuncs.py @@ -191,7 +191,7 @@ self.assertEqual(self._dll.tv_i(-42), None) self.assertEqual(self.S(), -42) -# The following repeates the above tests with stdcall functions (where +# The following repeats the above tests with stdcall functions (where # they are available) try: WinDLL diff --git a/lib-python/2.7/ctypes/test/test_integers.py b/lib-python/2.7/ctypes/test/test_integers.py --- a/lib-python/2.7/ctypes/test/test_integers.py +++ b/lib-python/2.7/ctypes/test/test_integers.py @@ -1,4 +1,4 @@ -# superseeded by test_numbers.py +# superseded by test_numbers.py import unittest if __name__ == '__main__': diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -216,7 +216,7 @@ def test_init(self): # c_int() can be initialized from Python's int, and c_int. - # Not from c_long or so, which seems strange, abd should + # Not from c_long or so, which seems strange, abc should # probably be changed: self.assertRaises(TypeError, c_int, c_long(42)) diff --git a/lib-python/2.7/ctypes/test/test_refcounts.py b/lib-python/2.7/ctypes/test/test_refcounts.py --- a/lib-python/2.7/ctypes/test/test_refcounts.py +++ b/lib-python/2.7/ctypes/test/test_refcounts.py @@ -47,7 +47,7 @@ # this is the standard refcount for func self.assertEqual(grc(func), 2) - # the CFuncPtr instance holds atr least one refcount on func: + # the CFuncPtr instance holds at least one refcount on func: f = OtherCallback(func) self.assertTrue(grc(func) > 2) @@ -64,7 +64,7 @@ x = X() x.a = OtherCallback(func) - # the CFuncPtr instance holds atr least one refcount on func: + # the CFuncPtr instance holds at least one refcount on func: self.assertTrue(grc(func) > 2) # and may release it again @@ -77,7 +77,7 @@ f = OtherCallback(func) - # the CFuncPtr instance holds atr least one refcount on func: + # the CFuncPtr instance holds at least one refcount on func: self.assertTrue(grc(func) > 2) # create a cycle diff --git a/lib-python/2.7/ctypes/test/test_structures.py b/lib-python/2.7/ctypes/test/test_structures.py --- a/lib-python/2.7/ctypes/test/test_structures.py +++ b/lib-python/2.7/ctypes/test/test_structures.py @@ -108,7 +108,7 @@ def test_emtpy(self): # I had problems with these # - # Although these are patological cases: Empty Structures! + # Although these are pathological cases: Empty Structures! class X(Structure): _fields_ = [] diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -93,7 +93,7 @@ fdout, ccout = tempfile.mkstemp() os.close(fdout) cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \ - '$CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name + 'LANG=C LC_ALL=C $CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name try: f = os.popen(cmd) try: diff --git a/lib-python/2.7/decimal.py b/lib-python/2.7/decimal.py --- a/lib-python/2.7/decimal.py +++ b/lib-python/2.7/decimal.py @@ -25,7 +25,7 @@ and IEEE standard 854-1987: - www.cs.berkeley.edu/~ejr/projects/754/private/drafts/854-1987/dir.html + http://en.wikipedia.org/wiki/IEEE_854-1987 Decimal floating point has finite precision with arbitrarily large bounds. diff --git a/lib-python/2.7/difflib.py b/lib-python/2.7/difflib.py --- a/lib-python/2.7/difflib.py +++ b/lib-python/2.7/difflib.py @@ -586,7 +586,7 @@ def get_grouped_opcodes(self, n=3): """ Isolate change clusters by eliminating ranges with no changes. - Return a generator of groups with upto n lines of context. + Return a generator of groups with up to n lines of context. Each group is in the same format as returned by get_opcodes(). >>> from pprint import pprint @@ -1361,7 +1361,7 @@ linejunk -- passed on to ndiff (see ndiff documentation) charjunk -- passed on to ndiff (see ndiff documentation) - This function returns an interator which returns a tuple: + This function returns an iterator which returns a tuple: (from line tuple, to line tuple, boolean flag) from/to line tuple -- (line num, line text) @@ -1963,7 +1963,7 @@ self._make_prefix() # change tabs to spaces before it gets more difficult after we insert - # markkup + # markup fromlines,tolines = self._tab_newline_replace(fromlines,tolines) # create diffs iterator which generates side by side from/to data diff --git a/lib-python/2.7/distutils/__init__.py b/lib-python/2.7/distutils/__init__.py --- a/lib-python/2.7/distutils/__init__.py +++ b/lib-python/2.7/distutils/__init__.py @@ -15,5 +15,5 @@ # Updated automatically by the Python release process. # #--start constants-- -__version__ = "2.7.5" +__version__ = "2.7.6" #--end constants-- diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -241,12 +241,10 @@ # building python standard extensions self.library_dirs.append('.') - # for extensions under Linux or Solaris with a shared Python library, + # For building extensions with a shared Python library, # Python's library directory must be appended to library_dirs - sysconfig.get_config_var('Py_ENABLE_SHARED') - if ((sys.platform.startswith('linux') or sys.platform.startswith('gnu') - or sys.platform.startswith('sunos')) - and sysconfig.get_config_var('Py_ENABLE_SHARED')): + # See Issues: #1600860, #4366 + if (sysconfig.get_config_var('Py_ENABLE_SHARED')): if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")): # building third party extensions self.library_dirs.append(sysconfig.get_config_var('LIBDIR')) diff --git a/lib-python/2.7/distutils/command/sdist.py b/lib-python/2.7/distutils/command/sdist.py --- a/lib-python/2.7/distutils/command/sdist.py +++ b/lib-python/2.7/distutils/command/sdist.py @@ -183,7 +183,7 @@ depends on the user's options. """ # new behavior when using a template: - # the file list is recalculated everytime because + # the file list is recalculated every time because # even if MANIFEST.in or setup.py are not changed # the user might have added some files in the tree that # need to be included. diff --git a/lib-python/2.7/distutils/cygwinccompiler.py b/lib-python/2.7/distutils/cygwinccompiler.py --- a/lib-python/2.7/distutils/cygwinccompiler.py +++ b/lib-python/2.7/distutils/cygwinccompiler.py @@ -322,13 +322,18 @@ else: entry_point = '' - self.set_executables(compiler='gcc -mno-cygwin -O -Wall', - compiler_so='gcc -mno-cygwin -mdll -O -Wall', - compiler_cxx='g++ -mno-cygwin -O -Wall', - linker_exe='gcc -mno-cygwin', - linker_so='%s -mno-cygwin %s %s' - % (self.linker_dll, shared_option, - entry_point)) + if self.gcc_version < '4' or is_cygwingcc(): + no_cygwin = ' -mno-cygwin' + else: + no_cygwin = '' + + self.set_executables(compiler='gcc%s -O -Wall' % no_cygwin, + compiler_so='gcc%s -mdll -O -Wall' % no_cygwin, + compiler_cxx='g++%s -O -Wall' % no_cygwin, + linker_exe='gcc%s' % no_cygwin, + linker_so='%s%s %s %s' + % (self.linker_dll, no_cygwin, + shared_option, entry_point)) # Maybe we should also append -mthreads, but then the finished # dlls need another dll (mingwm10.dll see Mingw32 docs) # (-mthreads: Support thread-safe exception handling on `Mingw32') @@ -450,3 +455,12 @@ else: dllwrap_version = None return (gcc_version, ld_version, dllwrap_version) + +def is_cygwingcc(): + '''Try to determine if the gcc that would be used is from cygwin.''' + out = os.popen('gcc -dumpmachine', 'r') + out_string = out.read() + out.close() + # out_string is the target triplet cpu-vendor-os + # Cygwin's gcc sets the os to 'cygwin' + return out_string.strip().endswith('cygwin') diff --git a/lib-python/2.7/distutils/sysconfig.py b/lib-python/2.7/distutils/sysconfig.py --- a/lib-python/2.7/distutils/sysconfig.py +++ b/lib-python/2.7/distutils/sysconfig.py @@ -1,4 +1,3 @@ - # The content of this file is redirected from # sysconfig_cpython or sysconfig_pypy. # All underscore names are imported too, because @@ -11,5 +10,3 @@ else: from distutils import sysconfig_cpython as _sysconfig_module globals().update(_sysconfig_module.__dict__) - -_USE_CLANG = None diff --git a/lib-python/2.7/distutils/sysconfig_cpython.py b/lib-python/2.7/distutils/sysconfig_cpython.py --- a/lib-python/2.7/distutils/sysconfig_cpython.py +++ b/lib-python/2.7/distutils/sysconfig_cpython.py @@ -175,9 +175,15 @@ 'CCSHARED', 'LDSHARED', 'SO', 'AR', 'ARFLAGS') - newcc = None if 'CC' in os.environ: - cc = os.environ['CC'] + newcc = os.environ['CC'] + if (sys.platform == 'darwin' + and 'LDSHARED' not in os.environ + and ldshared.startswith(cc)): + # On OS X, if CC is overridden, use that as the default + # command for LDSHARED as well + ldshared = newcc + ldshared[len(cc):] + cc = newcc if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: diff --git a/lib-python/2.7/distutils/tests/test_build_clib.py b/lib-python/2.7/distutils/tests/test_build_clib.py --- a/lib-python/2.7/distutils/tests/test_build_clib.py +++ b/lib-python/2.7/distutils/tests/test_build_clib.py @@ -77,7 +77,7 @@ cmd.compiler = FakeCompiler() - # build_libraries is also doing a bit of typoe checking + # build_libraries is also doing a bit of typo checking lib = [('name', {'sources': 'notvalid'})] self.assertRaises(DistutilsSetupError, cmd.build_libraries, lib) diff --git a/lib-python/2.7/distutils/tests/test_cmd.py b/lib-python/2.7/distutils/tests/test_cmd.py --- a/lib-python/2.7/distutils/tests/test_cmd.py +++ b/lib-python/2.7/distutils/tests/test_cmd.py @@ -34,6 +34,18 @@ self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, 'not_string_list2') + cmd.option1 = 'ok,dok' + cmd.ensure_string_list('option1') + self.assertEqual(cmd.option1, ['ok', 'dok']) + + cmd.option2 = ['xxx', 'www'] + cmd.ensure_string_list('option2') + + cmd.option3 = ['ok', 2] + self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, + 'option3') + + def test_make_file(self): cmd = self.cmd @@ -77,19 +89,6 @@ cmd.option3 = 1 self.assertRaises(DistutilsOptionError, cmd.ensure_string, 'option3') - def test_ensure_string_list(self): - cmd = self.cmd - cmd.option1 = 'ok,dok' - cmd.ensure_string_list('option1') - self.assertEqual(cmd.option1, ['ok', 'dok']) - - cmd.option2 = ['xxx', 'www'] - cmd.ensure_string_list('option2') - - cmd.option3 = ['ok', 2] - self.assertRaises(DistutilsOptionError, cmd.ensure_string_list, - 'option3') - def test_ensure_filename(self): cmd = self.cmd cmd.option1 = __file__ diff --git a/lib-python/2.7/distutils/tests/test_unixccompiler.py b/lib-python/2.7/distutils/tests/test_unixccompiler.py --- a/lib-python/2.7/distutils/tests/test_unixccompiler.py +++ b/lib-python/2.7/distutils/tests/test_unixccompiler.py @@ -1,7 +1,8 @@ """Tests for distutils.unixccompiler.""" +import os import sys import unittest -from test.test_support import run_unittest +from test.test_support import EnvironmentVarGuard, run_unittest from distutils import sysconfig from distutils.unixccompiler import UnixCCompiler @@ -122,6 +123,37 @@ sysconfig.get_config_var = gcv self.assertEqual(self.cc.rpath_foo(), '-R/foo') + @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for OS X') + def test_osx_cc_overrides_ldshared(self): + # Issue #18080: + # ensure that setting CC env variable also changes default linker + def gcv(v): + if v == 'LDSHARED': + return 'gcc-4.2 -bundle -undefined dynamic_lookup ' + return 'gcc-4.2' + sysconfig.get_config_var = gcv + with EnvironmentVarGuard() as env: + env['CC'] = 'my_cc' + del env['LDSHARED'] + sysconfig.customize_compiler(self.cc) + self.assertEqual(self.cc.linker_so[0], 'my_cc') + + @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for OS X') + def test_osx_explict_ldshared(self): + # Issue #18080: + # ensure that setting CC env variable does not change + # explicit LDSHARED setting for linker + def gcv(v): + if v == 'LDSHARED': + return 'gcc-4.2 -bundle -undefined dynamic_lookup ' + return 'gcc-4.2' + sysconfig.get_config_var = gcv + with EnvironmentVarGuard() as env: + env['CC'] = 'my_cc' + env['LDSHARED'] = 'my_ld -bundle -dynamic' + sysconfig.customize_compiler(self.cc) + self.assertEqual(self.cc.linker_so[0], 'my_ld') + def test_suite(): return unittest.makeSuite(UnixCCompilerTestCase) diff --git a/lib-python/2.7/doctest.py b/lib-python/2.7/doctest.py --- a/lib-python/2.7/doctest.py +++ b/lib-python/2.7/doctest.py @@ -424,7 +424,7 @@ zero-based, with respect to the beginning of the DocTest. - indent: The example's indentation in the DocTest string. - I.e., the number of space characters that preceed the + I.e., the number of space characters that precede the example's first prompt. - options: A dictionary mapping from option flags to True or @@ -564,7 +564,7 @@ # Want consists of any non-blank lines that do not start with PS1. (?P (?:(?![ ]*$) # Not a blank line (?![ ]*>>>) # Not a line starting with PS1 - .*$\n? # But any other line + .+$\n? # But any other line )*) ''', re.MULTILINE | re.VERBOSE) @@ -895,7 +895,7 @@ if '__name__' not in globs: globs['__name__'] = '__main__' # provide a default module name - # Recursively expore `obj`, extracting DocTests. + # Recursively explore `obj`, extracting DocTests. tests = [] self._find(tests, obj, name, module, source_lines, globs, {}) # Sort the tests by alpha order of names, for consistency in diff --git a/lib-python/2.7/email/charset.py b/lib-python/2.7/email/charset.py --- a/lib-python/2.7/email/charset.py +++ b/lib-python/2.7/email/charset.py @@ -183,7 +183,7 @@ header encoding. Charset.SHORTEST is not allowed for body_encoding. - output_charset: Some character sets must be converted before the can be + output_charset: Some character sets must be converted before they can be used in email headers or bodies. If the input_charset is one of them, this attribute will contain the name of the charset output will be converted to. Otherwise, it will diff --git a/lib-python/2.7/fileinput.py b/lib-python/2.7/fileinput.py --- a/lib-python/2.7/fileinput.py +++ b/lib-python/2.7/fileinput.py @@ -90,12 +90,11 @@ def input(files=None, inplace=0, backup="", bufsize=0, mode="r", openhook=None): - """input([files[, inplace[, backup[, mode[, openhook]]]]]) + """Return an instance of the FileInput class, which can be iterated. - Create an instance of the FileInput class. The instance will be used - as global state for the functions of this module, and is also returned - to use during iteration. The parameters to this function will be passed - along to the constructor of the FileInput class. + The parameters are passed to the constructor of the FileInput class. + The returned instance, in addition to being an iterator, + keeps global state for the functions of this module,. """ global _state if _state and _state._file: @@ -182,7 +181,7 @@ return _state.isstdin() class FileInput: - """class FileInput([files[, inplace[, backup[, mode[, openhook]]]]]) + """FileInput([files[, inplace[, backup[, bufsize[, mode[, openhook]]]]]]) Class FileInput is the implementation of the module; its methods filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(), diff --git a/lib-python/2.7/ftplib.py b/lib-python/2.7/ftplib.py --- a/lib-python/2.7/ftplib.py +++ b/lib-python/2.7/ftplib.py @@ -55,6 +55,8 @@ # The standard FTP server control port FTP_PORT = 21 +# The sizehint parameter passed to readline() calls +MAXLINE = 8192 # Exception raised when an error or invalid response is received @@ -101,6 +103,7 @@ debugging = 0 host = '' port = FTP_PORT + maxline = MAXLINE sock = None file = None welcome = None @@ -180,7 +183,9 @@ # Internal: return one line from the server, stripping CRLF. # Raise EOFError if the connection is closed def getline(self): - line = self.file.readline() + line = self.file.readline(self.maxline + 1) + if len(line) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if self.debugging > 1: print '*get*', self.sanitize(line) if not line: raise EOFError @@ -432,7 +437,9 @@ conn = self.transfercmd(cmd) fp = conn.makefile('rb') while 1: - line = fp.readline() + line = fp.readline(self.maxline + 1) + if len(line) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if self.debugging > 2: print '*retr*', repr(line) if not line: break @@ -485,7 +492,9 @@ self.voidcmd('TYPE A') conn = self.transfercmd(cmd) while 1: - buf = fp.readline() + buf = fp.readline(self.maxline + 1) + if len(buf) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if not buf: break if buf[-2:] != CRLF: if buf[-1] in CRLF: buf = buf[:-1] @@ -710,7 +719,9 @@ fp = conn.makefile('rb') try: while 1: - line = fp.readline() + line = fp.readline(self.maxline + 1) + if len(line) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if self.debugging > 2: print '*retr*', repr(line) if not line: break @@ -748,7 +759,9 @@ conn = self.transfercmd(cmd) try: while 1: - buf = fp.readline() + buf = fp.readline(self.maxline + 1) + if len(buf) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if not buf: break if buf[-2:] != CRLF: if buf[-1] in CRLF: buf = buf[:-1] @@ -905,7 +918,9 @@ fp = open(filename, "r") in_macro = 0 while 1: - line = fp.readline() + line = fp.readline(self.maxline + 1) + if len(line) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if not line: break if in_macro and line.strip(): macro_lines.append(line) diff --git a/lib-python/2.7/genericpath.py b/lib-python/2.7/genericpath.py --- a/lib-python/2.7/genericpath.py +++ b/lib-python/2.7/genericpath.py @@ -22,7 +22,7 @@ # This follows symbolic links, so both islink() and isdir() can be true -# for the same path ono systems that support symlinks +# for the same path on systems that support symlinks def isfile(path): """Test whether a path is a regular file""" try: diff --git a/lib-python/2.7/heapq.py b/lib-python/2.7/heapq.py --- a/lib-python/2.7/heapq.py +++ b/lib-python/2.7/heapq.py @@ -366,6 +366,7 @@ ''' _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration + _len = len h = [] h_append = h.append @@ -377,17 +378,21 @@ pass heapify(h) - while 1: + while _len(h) > 1: try: while 1: - v, itnum, next = s = h[0] # raises IndexError when h is empty + v, itnum, next = s = h[0] yield v s[0] = next() # raises StopIteration when exhausted _heapreplace(h, s) # restore heap condition except _StopIteration: _heappop(h) # remove empty iterator - except IndexError: - return + if h: + # fast case when only a single iterator remains + v, itnum, next = h[0] + yield v + for v in next.__self__: + yield v # Extend the implementations of nsmallest and nlargest to use a key= argument _nsmallest = nsmallest diff --git a/lib-python/2.7/idlelib/AutoComplete.py b/lib-python/2.7/idlelib/AutoComplete.py --- a/lib-python/2.7/idlelib/AutoComplete.py +++ b/lib-python/2.7/idlelib/AutoComplete.py @@ -156,12 +156,9 @@ if not comp_lists[0]: return self.autocompletewindow = self._make_autocomplete_window() - self.autocompletewindow.show_window(comp_lists, - "insert-%dc" % len(comp_start), - complete, - mode, - userWantsWin) - return True + return not self.autocompletewindow.show_window( + comp_lists, "insert-%dc" % len(comp_start), + complete, mode, userWantsWin) def fetch_completions(self, what, mode): """Return a pair of lists of completions for something. The first list diff --git a/lib-python/2.7/idlelib/AutoCompleteWindow.py b/lib-python/2.7/idlelib/AutoCompleteWindow.py --- a/lib-python/2.7/idlelib/AutoCompleteWindow.py +++ b/lib-python/2.7/idlelib/AutoCompleteWindow.py @@ -157,13 +157,14 @@ self.start = self.widget.get(self.startindex, "insert") if complete: completed = self._complete_string(self.start) + start = self.start self._change_start(completed) i = self._binary_search(completed) if self.completions[i] == completed and \ (i == len(self.completions)-1 or self.completions[i+1][:len(completed)] != completed): # There is exactly one matching completion - return + return completed == start self.userwantswindow = userWantsWin self.lasttypedstart = self.start diff --git a/lib-python/2.7/idlelib/Bindings.py b/lib-python/2.7/idlelib/Bindings.py --- a/lib-python/2.7/idlelib/Bindings.py +++ b/lib-python/2.7/idlelib/Bindings.py @@ -15,7 +15,7 @@ menudefs = [ # underscore prefixes character to underscore ('file', [ - ('_New Window', '<>'), + ('_New File', '<>'), ('_Open...', '<>'), ('Open _Module...', '<>'), ('Class _Browser', '<>'), @@ -98,6 +98,10 @@ # menu del menudefs[-1][1][0:2] + # Remove the 'Configure' entry from the options menu, it is in the + # application menu as 'Preferences' + del menudefs[-2][1][0:2] + default_keydefs = idleConf.GetCurrentKeySet() del sys diff --git a/lib-python/2.7/idlelib/CallTips.py b/lib-python/2.7/idlelib/CallTips.py --- a/lib-python/2.7/idlelib/CallTips.py +++ b/lib-python/2.7/idlelib/CallTips.py @@ -163,7 +163,7 @@ if fob.func_code.co_flags & 0x8: items.append("***") arg_text = ", ".join(items) - arg_text = "(%s)" % re.sub("\.\d+", "", arg_text) + arg_text = "(%s)" % re.sub("(?", arg_text) # See if we can use the docstring doc = getattr(ob, "__doc__", "") if doc: @@ -223,4 +223,6 @@ tests = (t1, t2, t3, t4, t5, t6, t7, TC, tc.t1, tc.t2, tc.t3, tc.t4, tc.t5, tc.t6, tc.t7) - test(tests) + # test(tests) + from unittest import main + main('idlelib.idle_test.test_calltips', verbosity=2, exit=False) diff --git a/lib-python/2.7/idlelib/Delegator.py b/lib-python/2.7/idlelib/Delegator.py --- a/lib-python/2.7/idlelib/Delegator.py +++ b/lib-python/2.7/idlelib/Delegator.py @@ -4,12 +4,12 @@ def __init__(self, delegate=None): self.delegate = delegate - self.__cache = {} + self.__cache = set() def __getattr__(self, name): attr = getattr(self.delegate, name) # May raise AttributeError setattr(self, name, attr) - self.__cache[name] = attr + self.__cache.add(name) return attr def __nonzero__(self): @@ -21,21 +21,13 @@ return True def resetcache(self): - for key in self.__cache.keys(): + for key in self.__cache: try: delattr(self, key) except AttributeError: pass self.__cache.clear() - def cachereport(self): - keys = self.__cache.keys() - keys.sort() - print keys - def setdelegate(self, delegate): self.resetcache() self.delegate = delegate - - def getdelegate(self): - return self.delegate diff --git a/lib-python/2.7/idlelib/EditorWindow.py b/lib-python/2.7/idlelib/EditorWindow.py --- a/lib-python/2.7/idlelib/EditorWindow.py +++ b/lib-python/2.7/idlelib/EditorWindow.py @@ -346,6 +346,36 @@ self.askinteger = tkSimpleDialog.askinteger self.showerror = tkMessageBox.showerror + self._highlight_workaround() # Fix selection tags on Windows + + def _highlight_workaround(self): + # On Windows, Tk removes painting of the selection + # tags which is different behavior than on Linux and Mac. + # See issue14146 for more information. + if not sys.platform.startswith('win'): + return + + text = self.text + text.event_add("<>", "") + text.event_add("<>", "") + def highlight_fix(focus): + sel_range = text.tag_ranges("sel") + if sel_range: + if focus == 'out': + HILITE_CONFIG = idleConf.GetHighlight( + idleConf.CurrentTheme(), 'hilite') + text.tag_config("sel_fix", HILITE_CONFIG) + text.tag_raise("sel_fix") + text.tag_add("sel_fix", *sel_range) + elif focus == 'in': + text.tag_remove("sel_fix", "1.0", "end") + + text.bind("<>", + lambda ev: highlight_fix("out")) + text.bind("<>", + lambda ev: highlight_fix("in")) + + def _filename_to_unicode(self, filename): """convert filename to unicode in order to display it in Tk""" if isinstance(filename, unicode) or not filename: @@ -437,7 +467,6 @@ ] if macosxSupport.runningAsOSXApp(): - del menu_specs[-3] menu_specs[-2] = ("windows", "_Window") @@ -660,7 +689,7 @@ # XXX Ought to insert current file's directory in front of path try: (f, file, (suffix, mode, type)) = _find_module(name) - except (NameError, ImportError), msg: + except (NameError, ImportError) as msg: tkMessageBox.showerror("Import error", str(msg), parent=self.text) return if type != imp.PY_SOURCE: @@ -804,7 +833,11 @@ menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1] for menubarItem in self.menudict.keys(): menu = self.menudict[menubarItem] - end = menu.index(END) + 1 + end = menu.index(END) + if end is None: + # Skip empty menus + continue + end += 1 for index in range(0, end): if menu.type(index) == 'command': accel = menu.entrycget(index, 'accelerator') @@ -861,11 +894,8 @@ "Load and update the recent files list and menus" rf_list = [] if os.path.exists(self.recent_files_path): - rf_list_file = open(self.recent_files_path,'r') - try: + with open(self.recent_files_path, 'r') as rf_list_file: rf_list = rf_list_file.readlines() - finally: - rf_list_file.close() if new_file: new_file = os.path.abspath(new_file) + '\n' if new_file in rf_list: diff --git a/lib-python/2.7/idlelib/FormatParagraph.py b/lib-python/2.7/idlelib/FormatParagraph.py --- a/lib-python/2.7/idlelib/FormatParagraph.py +++ b/lib-python/2.7/idlelib/FormatParagraph.py @@ -1,18 +1,19 @@ -# Extension to format a paragraph +"""Extension to format a paragraph or selection to a max width. -# Does basic, standard text formatting, and also understands Python -# comment blocks. Thus, for editing Python source code, this -# extension is really only suitable for reformatting these comment -# blocks or triple-quoted strings. +Does basic, standard text formatting, and also understands Python +comment blocks. Thus, for editing Python source code, this +extension is really only suitable for reformatting these comment +blocks or triple-quoted strings. -# Known problems with comment reformatting: -# * If there is a selection marked, and the first line of the -# selection is not complete, the block will probably not be detected -# as comments, and will have the normal "text formatting" rules -# applied. -# * If a comment block has leading whitespace that mixes tabs and -# spaces, they will not be considered part of the same block. -# * Fancy comments, like this bulleted list, arent handled :-) +Known problems with comment reformatting: +* If there is a selection marked, and the first line of the + selection is not complete, the block will probably not be detected + as comments, and will have the normal "text formatting" rules + applied. +* If a comment block has leading whitespace that mixes tabs and + spaces, they will not be considered part of the same block. +* Fancy comments, like this bulleted list, aren't handled :-) +""" import re from idlelib.configHandler import idleConf @@ -32,41 +33,31 @@ self.editwin = None def format_paragraph_event(self, event): - maxformatwidth = int(idleConf.GetOption('main','FormatParagraph', - 'paragraph', type='int')) + """Formats paragraph to a max width specified in idleConf. + + If text is selected, format_paragraph_event will start breaking lines + at the max width, starting from the beginning selection. + + If no text is selected, format_paragraph_event uses the current + cursor location to determine the paragraph (lines of text surrounded + by blank lines) and formats it. + """ + maxformatwidth = idleConf.GetOption( + 'main', 'FormatParagraph', 'paragraph', type='int') text = self.editwin.text first, last = self.editwin.get_selection_indices() if first and last: data = text.get(first, last) - comment_header = '' + comment_header = get_comment_header(data) else: first, last, comment_header, data = \ find_paragraph(text, text.index("insert")) if comment_header: - # Reformat the comment lines - convert to text sans header. - lines = data.split("\n") - lines = map(lambda st, l=len(comment_header): st[l:], lines) - data = "\n".join(lines) - # Reformat to maxformatwidth chars or a 20 char width, whichever is greater. - format_width = max(maxformatwidth - len(comment_header), 20) - newdata = reformat_paragraph(data, format_width) - # re-split and re-insert the comment header. - newdata = newdata.split("\n") - # If the block ends in a \n, we dont want the comment - # prefix inserted after it. (Im not sure it makes sense to - # reformat a comment block that isnt made of complete - # lines, but whatever!) Can't think of a clean solution, - # so we hack away - block_suffix = "" - if not newdata[-1]: - block_suffix = "\n" - newdata = newdata[:-1] - builder = lambda item, prefix=comment_header: prefix+item - newdata = '\n'.join(map(builder, newdata)) + block_suffix + newdata = reformat_comment(data, maxformatwidth, comment_header) else: - # Just a normal text format newdata = reformat_paragraph(data, maxformatwidth) text.tag_remove("sel", "1.0", "end") + if newdata != data: text.mark_set("insert", first) text.undo_block_start() @@ -79,31 +70,44 @@ return "break" def find_paragraph(text, mark): + """Returns the start/stop indices enclosing the paragraph that mark is in. + + Also returns the comment format string, if any, and paragraph of text + between the start/stop indices. + """ lineno, col = map(int, mark.split(".")) - line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) + line = text.get("%d.0" % lineno, "%d.end" % lineno) + + # Look for start of next paragraph if the index passed in is a blank line while text.compare("%d.0" % lineno, "<", "end") and is_all_white(line): lineno = lineno + 1 - line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) + line = text.get("%d.0" % lineno, "%d.end" % lineno) first_lineno = lineno comment_header = get_comment_header(line) comment_header_len = len(comment_header) + + # Once start line found, search for end of paragraph (a blank line) while get_comment_header(line)==comment_header and \ not is_all_white(line[comment_header_len:]): lineno = lineno + 1 - line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) + line = text.get("%d.0" % lineno, "%d.end" % lineno) last = "%d.0" % lineno - # Search back to beginning of paragraph + + # Search back to beginning of paragraph (first blank line before) lineno = first_lineno - 1 - line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) + line = text.get("%d.0" % lineno, "%d.end" % lineno) while lineno > 0 and \ get_comment_header(line)==comment_header and \ not is_all_white(line[comment_header_len:]): lineno = lineno - 1 - line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) + line = text.get("%d.0" % lineno, "%d.end" % lineno) first = "%d.0" % (lineno+1) + return first, last, comment_header, text.get(first, last) +# This should perhaps be replaced with textwrap.wrap def reformat_paragraph(data, limit): + """Return data reformatted to specified width (limit).""" lines = data.split("\n") i = 0 n = len(lines) @@ -126,7 +130,7 @@ if not word: continue # Can happen when line ends in whitespace if len((partial + word).expandtabs()) > limit and \ - partial != indent1: + partial != indent1: new.append(partial.rstrip()) partial = indent2 partial = partial + word + " " @@ -138,13 +142,50 @@ new.extend(lines[i:]) return "\n".join(new) +def reformat_comment(data, limit, comment_header): + """Return data reformatted to specified width with comment header.""" + + # Remove header from the comment lines + lc = len(comment_header) + data = "\n".join(line[lc:] for line in data.split("\n")) + # Reformat to maxformatwidth chars or a 20 char width, + # whichever is greater. + format_width = max(limit - len(comment_header), 20) + newdata = reformat_paragraph(data, format_width) + # re-split and re-insert the comment header. + newdata = newdata.split("\n") + # If the block ends in a \n, we dont want the comment prefix + # inserted after it. (Im not sure it makes sense to reformat a + # comment block that is not made of complete lines, but whatever!) + # Can't think of a clean solution, so we hack away + block_suffix = "" + if not newdata[-1]: + block_suffix = "\n" + newdata = newdata[:-1] + return '\n'.join(comment_header+line for line in newdata) + block_suffix + def is_all_white(line): + """Return True if line is empty or all whitespace.""" + return re.match(r"^\s*$", line) is not None def get_indent(line): - return re.match(r"^(\s*)", line).group() + """Return the initial space or tab indent of line.""" + return re.match(r"^([ \t]*)", line).group() def get_comment_header(line): - m = re.match(r"^(\s*#*)", line) + """Return string with leading whitespace and '#' from line or ''. + + A null return indicates that the line is not a comment line. A non- + null return, such as ' #', will be used to find the other lines of + a comment block with the same indent. + """ + m = re.match(r"^([ \t]*#*)", line) if m is None: return "" return m.group(1) + +if __name__ == "__main__": + from test import support; support.use_resources = ['gui'] + import unittest + unittest.main('idlelib.idle_test.test_formatparagraph', + verbosity=2, exit=False) diff --git a/lib-python/2.7/idlelib/GrepDialog.py b/lib-python/2.7/idlelib/GrepDialog.py --- a/lib-python/2.7/idlelib/GrepDialog.py +++ b/lib-python/2.7/idlelib/GrepDialog.py @@ -81,36 +81,24 @@ hits = 0 for fn in list: try: - f = open(fn) - except IOError, msg: + with open(fn) as f: + for lineno, line in enumerate(f, 1): + if line[-1:] == '\n': + line = line[:-1] + if prog.search(line): + sys.stdout.write("%s: %s: %s\n" % + (fn, lineno, line)) + hits += 1 + except IOError as msg: print msg - continue - lineno = 0 - while 1: - block = f.readlines(100000) - if not block: - break - for line in block: - lineno = lineno + 1 - if line[-1:] == '\n': - line = line[:-1] - if prog.search(line): - sys.stdout.write("%s: %s: %s\n" % (fn, lineno, line)) - hits = hits + 1 - if hits: - if hits == 1: - s = "" - else: - s = "s" - print "Found", hits, "hit%s." % s - print "(Hint: right-click to open locations.)" - else: - print "No hits." + print(("Hits found: %s\n" + "(Hint: right-click to open locations.)" + % hits) if hits else "No hits.") def findfiles(self, dir, base, rec): try: names = os.listdir(dir or os.curdir) - except os.error, msg: + except os.error as msg: print msg return [] list = [] @@ -131,3 +119,9 @@ if self.top: self.top.grab_release() self.top.withdraw() + +if __name__ == "__main__": + # A human test is a bit tricky since EditorWindow() imports this module. + # Hence Idle must be restarted after editing this file for a live test. + import unittest + unittest.main('idlelib.idle_test.test_grep', verbosity=2, exit=False) diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py --- a/lib-python/2.7/idlelib/IOBinding.py +++ b/lib-python/2.7/idlelib/IOBinding.py @@ -71,7 +71,7 @@ encoding = encoding.lower() -coding_re = re.compile("coding[:=]\s*([-\w_.]+)") +coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)') class EncodingMessage(SimpleDialog): "Inform user that an encoding declaration is needed." @@ -125,11 +125,12 @@ Raise LookupError if the encoding is declared but unknown. """ # Only consider the first two lines - str = str.split("\n")[:2] - str = "\n".join(str) - - match = coding_re.search(str) - if not match: + lst = str.split("\n", 2)[:2] + for line in lst: + match = coding_re.match(line) + if match is not None: + break + else: return None name = match.group(1) # Check whether the encoding is known @@ -248,10 +249,9 @@ try: # open the file in binary mode so that we can handle # end-of-line convention ourselves. - f = open(filename,'rb') - chars = f.read() - f.close() - except IOError, msg: + with open(filename, 'rb') as f: + chars = f.read() + except IOError as msg: tkMessageBox.showerror("I/O Error", str(msg), master=self.text) return False @@ -294,7 +294,7 @@ # Next look for coding specification try: enc = coding_spec(chars) - except LookupError, name: + except LookupError as name: tkMessageBox.showerror( title="Error loading the file", message="The encoding '%s' is not known to this Python "\ @@ -383,12 +383,10 @@ if self.eol_convention != "\n": chars = chars.replace("\n", self.eol_convention) try: - f = open(filename, "wb") - f.write(chars) - f.flush() - f.close() + with open(filename, "wb") as f: + f.write(chars) return True - except IOError, msg: + except IOError as msg: tkMessageBox.showerror("I/O Error", str(msg), master=self.text) return False @@ -408,7 +406,7 @@ try: enc = coding_spec(chars) failed = None - except LookupError, msg: + except LookupError as msg: failed = msg enc = None if enc: diff --git a/lib-python/2.7/idlelib/IdleHistory.py b/lib-python/2.7/idlelib/IdleHistory.py --- a/lib-python/2.7/idlelib/IdleHistory.py +++ b/lib-python/2.7/idlelib/IdleHistory.py @@ -1,81 +1,93 @@ +"Implement Idle Shell history mechanism with History class" + from idlelib.configHandler import idleConf class History: + ''' Implement Idle Shell history mechanism. - def __init__(self, text, output_sep = "\n"): + store - Store source statement (called from PyShell.resetoutput). + fetch - Fetch stored statement matching prefix already entered. + history_next - Bound to <> event (default Alt-N). + history_prev - Bound to <> event (default Alt-P). + ''' + def __init__(self, text): + '''Initialize data attributes and bind event methods. + + .text - Idle wrapper of tk Text widget, with .bell(). + .history - source statements, possibly with multiple lines. + .prefix - source already entered at prompt; filters history list. + .pointer - index into history. + .cyclic - wrap around history list (or not). + ''' self.text = text self.history = [] - self.history_prefix = None - self.history_pointer = None - self.output_sep = output_sep + self.prefix = None + self.pointer = None self.cyclic = idleConf.GetOption("main", "History", "cyclic", 1, "bool") text.bind("<>", self.history_prev) text.bind("<>", self.history_next) def history_next(self, event): - self.history_do(0) + "Fetch later statement; start with ealiest if cyclic." + self.fetch(reverse=False) return "break" def history_prev(self, event): - self.history_do(1) + "Fetch earlier statement; start with most recent." + self.fetch(reverse=True) return "break" - def _get_source(self, start, end): - # Get source code from start index to end index. Lines in the - # text control may be separated by sys.ps2 . - lines = self.text.get(start, end).split(self.output_sep) - return "\n".join(lines) + def fetch(self, reverse): + '''Fetch statememt and replace current line in text widget. - def _put_source(self, where, source): - output = self.output_sep.join(source.split("\n")) - self.text.insert(where, output) - - def history_do(self, reverse): + Set prefix and pointer as needed for successive fetches. + Reset them to None, None when returning to the start line. + Sound bell when return to start line or cannot leave a line + because cyclic is False. + ''' nhist = len(self.history) - pointer = self.history_pointer - prefix = self.history_prefix + pointer = self.pointer + prefix = self.prefix if pointer is not None and prefix is not None: if self.text.compare("insert", "!=", "end-1c") or \ - self._get_source("iomark", "end-1c") != self.history[pointer]: + self.text.get("iomark", "end-1c") != self.history[pointer]: pointer = prefix = None + self.text.mark_set("insert", "end-1c") # != after cursor move if pointer is None or prefix is None: - prefix = self._get_source("iomark", "end-1c") + prefix = self.text.get("iomark", "end-1c") if reverse: - pointer = nhist + pointer = nhist # will be decremented else: if self.cyclic: - pointer = -1 - else: + pointer = -1 # will be incremented + else: # abort history_next self.text.bell() return nprefix = len(prefix) while 1: - if reverse: - pointer = pointer - 1 - else: - pointer = pointer + 1 + pointer += -1 if reverse else 1 if pointer < 0 or pointer >= nhist: self.text.bell() - if not self.cyclic and pointer < 0: + if not self.cyclic and pointer < 0: # abort history_prev return else: - if self._get_source("iomark", "end-1c") != prefix: + if self.text.get("iomark", "end-1c") != prefix: self.text.delete("iomark", "end-1c") - self._put_source("iomark", prefix) + self.text.insert("iomark", prefix) pointer = prefix = None break item = self.history[pointer] if item[:nprefix] == prefix and len(item) > nprefix: self.text.delete("iomark", "end-1c") - self._put_source("iomark", item) + self.text.insert("iomark", item) break - self.text.mark_set("insert", "end-1c") self.text.see("insert") self.text.tag_remove("sel", "1.0", "end") - self.history_pointer = pointer - self.history_prefix = prefix + self.pointer = pointer + self.prefix = prefix - def history_store(self, source): + def store(self, source): + "Store Shell input statement into history list." source = source.strip() if len(source) > 2: # avoid duplicates @@ -84,5 +96,11 @@ except ValueError: pass self.history.append(source) - self.history_pointer = None - self.history_prefix = None + self.pointer = None + self.prefix = None + +if __name__ == "__main__": + from test import test_support as support + support.use_resources = ['gui'] + from unittest import main + main('idlelib.idle_test.test_idlehistory', verbosity=2, exit=False) diff --git a/lib-python/2.7/idlelib/PathBrowser.py b/lib-python/2.7/idlelib/PathBrowser.py --- a/lib-python/2.7/idlelib/PathBrowser.py +++ b/lib-python/2.7/idlelib/PathBrowser.py @@ -92,4 +92,5 @@ mainloop() if __name__ == "__main__": - main() + from unittest import main + main('idlelib.idle_test.test_pathbrowser', verbosity=2, exit=False) diff --git a/lib-python/2.7/idlelib/PyShell.py b/lib-python/2.7/idlelib/PyShell.py --- a/lib-python/2.7/idlelib/PyShell.py +++ b/lib-python/2.7/idlelib/PyShell.py @@ -50,35 +50,55 @@ # internal warnings to the console. ScriptBinding.check_syntax() will # temporarily redirect the stream to the shell window to display warnings when # checking user's code. -global warning_stream -warning_stream = sys.__stderr__ -try: - import warnings -except ImportError: - pass -else: - def idle_showwarning(message, category, filename, lineno, - file=None, line=None): - if file is None: - file = warning_stream - try: - file.write(warnings.formatwarning(message, category, filename, - lineno, line=line)) - except IOError: - pass ## file (probably __stderr__) is invalid, warning dropped. - warnings.showwarning = idle_showwarning - def idle_formatwarning(message, category, filename, lineno, line=None): - """Format warnings the IDLE way""" - s = "\nWarning (from warnings module):\n" - s += ' File \"%s\", line %s\n' % (filename, lineno) - if line is None: - line = linecache.getline(filename, lineno) - line = line.strip() - if line: - s += " %s\n" % line - s += "%s: %s\n>>> " % (category.__name__, message) - return s - warnings.formatwarning = idle_formatwarning +warning_stream = sys.__stderr__ # None, at least on Windows, if no console. +import warnings + +def idle_formatwarning(message, category, filename, lineno, line=None): + """Format warnings the IDLE way.""" + + s = "\nWarning (from warnings module):\n" + s += ' File \"%s\", line %s\n' % (filename, lineno) + if line is None: + line = linecache.getline(filename, lineno) + line = line.strip() + if line: + s += " %s\n" % line + s += "%s: %s\n" % (category.__name__, message) + return s + +def idle_showwarning( + message, category, filename, lineno, file=None, line=None): + """Show Idle-format warning (after replacing warnings.showwarning). + + The differences are the formatter called, the file=None replacement, + which can be None, the capture of the consequence AttributeError, + and the output of a hard-coded prompt. + """ + if file is None: + file = warning_stream + try: + file.write(idle_formatwarning( + message, category, filename, lineno, line=line)) + file.write(">>> ") + except (AttributeError, IOError): + pass # if file (probably __stderr__) is invalid, skip warning. + +_warnings_showwarning = None + +def capture_warnings(capture): + "Replace warning.showwarning with idle_showwarning, or reverse." + + global _warnings_showwarning + if capture: + if _warnings_showwarning is None: + _warnings_showwarning = warnings.showwarning + warnings.showwarning = idle_showwarning + else: + if _warnings_showwarning is not None: + warnings.showwarning = _warnings_showwarning + _warnings_showwarning = None + +capture_warnings(True) def extended_linecache_checkcache(filename=None, orig_checkcache=linecache.checkcache): @@ -370,6 +390,7 @@ self.port = PORT self.original_compiler_flags = self.compile.compiler.flags + _afterid = None rpcclt = None rpcpid = None @@ -409,7 +430,7 @@ try: self.rpcclt = MyRPCClient(addr) break - except socket.error, err: + except socket.error as err: pass else: self.display_port_binding_error() @@ -430,7 +451,7 @@ self.rpcclt.listening_sock.settimeout(10) try: self.rpcclt.accept() - except socket.timeout, err: + except socket.timeout as err: self.display_no_subprocess_error() return None self.rpcclt.register("console", self.tkconsole) @@ -465,7 +486,7 @@ self.spawn_subprocess() try: self.rpcclt.accept() - except socket.timeout, err: + except socket.timeout as err: self.display_no_subprocess_error() return None self.transfer_path(with_cwd=with_cwd) @@ -497,6 +518,8 @@ threading.Thread(target=self.__request_interrupt).start() def kill_subprocess(self): + if self._afterid is not None: + self.tkconsole.text.after_cancel(self._afterid) try: self.rpcclt.close() except AttributeError: # no socket @@ -569,8 +592,8 @@ pass # Reschedule myself if not self.tkconsole.closing: - self.tkconsole.text.after(self.tkconsole.pollinterval, - self.poll_subprocess) + self._afterid = self.tkconsole.text.after( + self.tkconsole.pollinterval, self.poll_subprocess) debugger = None @@ -844,7 +867,6 @@ ] if macosxSupport.runningAsOSXApp(): - del menu_specs[-3] menu_specs[-2] = ("windows", "_Window") @@ -988,10 +1010,6 @@ self.stop_readline() self.canceled = True self.closing = True - # Wait for poll_subprocess() rescheduling to stop - self.text.after(2 * self.pollinterval, self.close2) - - def close2(self): return EditorWindow.close(self) def _close(self): @@ -1260,7 +1278,7 @@ def resetoutput(self): source = self.text.get("iomark", "end-1c") if self.history: - self.history.history_store(source) + self.history.store(source) if self.text.get("end-2c") != "\n": self.text.insert("end-1c", "\n") self.text.mark_set("iomark", "end-1c") @@ -1430,6 +1448,7 @@ def main(): global flist, root, use_subprocess + capture_warnings(True) use_subprocess = True enable_shell = False enable_edit = False @@ -1439,7 +1458,7 @@ startup = False try: opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:") - except getopt.error, msg: + except getopt.error as msg: sys.stderr.write("Error: %s\n" % str(msg)) sys.stderr.write(usage_msg) sys.exit(2) @@ -1562,7 +1581,10 @@ while flist.inversedict: # keep IDLE running while files are open. root.mainloop() root.destroy() + capture_warnings(False) if __name__ == "__main__": sys.modules['PyShell'] = sys.modules['__main__'] main() + +capture_warnings(False) # Make sure turned off; see issue 18081 diff --git a/lib-python/2.7/idlelib/RstripExtension.py b/lib-python/2.7/idlelib/RstripExtension.py --- a/lib-python/2.7/idlelib/RstripExtension.py +++ b/lib-python/2.7/idlelib/RstripExtension.py @@ -1,13 +1,9 @@ 'Provides "Strip trailing whitespace" under the "Format" menu.' -__author__ = "Roger D. Serwy " - class RstripExtension: menudefs = [ - ('format', [None, - ('Strip trailing whitespace', '<>'), - ]),] + ('format', [None, ('Strip trailing whitespace', '<>'), ] ), ] def __init__(self, editwin): self.editwin = editwin @@ -20,10 +16,18 @@ undo.undo_block_start() - end_line = int(float(text.index('end'))) + 1 + end_line = int(float(text.index('end'))) for cur in range(1, end_line): - txt = text.get('%i.0' % cur, '%i.0 lineend' % cur) + txt = text.get('%i.0' % cur, '%i.end' % cur) + raw = len(txt) cut = len(txt.rstrip()) - text.delete('%i.%i' % (cur, cut), '%i.0 lineend' % cur) + # Since text.delete() marks file as changed, even if not, + # only call it when needed to actually delete something. + if cut < raw: + text.delete('%i.%i' % (cur, cut), '%i.end' % cur) undo.undo_block_stop() + +if __name__ == "__main__": + import unittest + unittest.main('idlelib.idle_test.test_rstrip', verbosity=2, exit=False) diff --git a/lib-python/2.7/idlelib/ScriptBinding.py b/lib-python/2.7/idlelib/ScriptBinding.py --- a/lib-python/2.7/idlelib/ScriptBinding.py +++ b/lib-python/2.7/idlelib/ScriptBinding.py @@ -70,13 +70,13 @@ f = open(filename, 'r') try: tabnanny.process_tokens(tokenize.generate_tokens(f.readline)) - except tokenize.TokenError, msg: + except tokenize.TokenError as msg: msgtxt, (lineno, start) = msg self.editwin.gotoline(lineno) self.errorbox("Tabnanny Tokenizing Error", "Token Error: %s" % msgtxt) return False - except tabnanny.NannyNag, nag: + except tabnanny.NannyNag as nag: # The error messages from tabnanny are too confusing... self.editwin.gotoline(nag.get_lineno()) self.errorbox("Tab/space error", indent_message) @@ -87,9 +87,8 @@ self.shell = shell = self.flist.open_shell() saved_stream = shell.get_warning_stream() shell.set_warning_stream(shell.stderr) - f = open(filename, 'r') - source = f.read() - f.close() + with open(filename, 'r') as f: + source = f.read() if '\r' in source: source = re.sub(r"\r\n", "\n", source) source = re.sub(r"\r", "\n", source) @@ -101,7 +100,7 @@ try: # If successful, return the compiled code return compile(source, filename, "exec") - except (SyntaxError, OverflowError, ValueError), err: From noreply at buildbot.pypy.org Sat Mar 1 22:31:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 22:31:33 +0100 (CET) Subject: [pypy-commit] stmgc default: in-progress Message-ID: <20140301213133.58DC81C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r915:ad1c9d106b1c Date: 2014-03-01 22:31 +0100 http://bitbucket.org/pypy/stmgc/changeset/ad1c9d106b1c/ Log: in-progress diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -187,122 +187,84 @@ } } -static void mark_record_modified_objects(void) +static inline void mark_record_trace(object_t **pobj) +{ + /* takes a normal pointer to a thread-local pointer to an object */ + object_t *obj = *pobj; + + if (obj == NULL || mark_visited_test_and_set(obj)) + return; /* already visited this object */ + + LIST_APPEND(mark_objects_to_trace, obj); +} + +static void mark_from_object(object_t *obj, char *segment_base) +{ + if (obj == NULL || mark_visited_test_and_set(obj)) + return; + + assert(list_is_empty(mark_objects_to_trace)); + + while (1) { + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(segment_base, obj); + stmcb_trace(realobj, &mark_record_trace); + + if (list_is_empty(mark_objects_to_trace)) + break; + + obj = (object_t *)list_pop_item(mark_objects_to_trace); + } +} + +static void mark_visit_from_roots(void) +{ + stm_thread_local_t *tl = stm_all_thread_locals; + do { + /* If 'tl' is currently running, its 'associated_segment_num' + field is the segment number that contains the correct + version of its overflowed objects. If not, then the + field is still some correct segment number, and it doesn't + matter which one we pick. */ + char *segment_base = get_segment_base(tl->associated_segment_num); + + object_t **current = tl->shadowstack; + object_t **base = tl->shadowstack_base; + while (current-- != base) { + assert(*current != (object_t *)-1); + mark_from_object(*current, segment_base); + } + mark_from_object(tl->thread_local_obj, segment_base); + + tl = tl->next; + } while (tl != stm_all_thread_locals); + + if (testing_prebuilt_objs != NULL) { + LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/, + mark_from_object(item, get_segment_base(0))); + } +} + +static void mark_visit_from_modified_objects(void) { /* The modified objects are the ones that may exist in two different versions: one in the segment that modified it, and another in all other segments. */ long i; for (i = 0; i < NB_SEGMENTS; i++) { - struct stm_priv_segment_info_s *pseg = get_priv_segment(i); char *base1 = get_segment_base(i); /* two different segments */ char *base2 = get_segment_base(!i); LIST_FOREACH_R( - pseg->modified_old_objects, + get_priv_segment(i)->modified_old_objects, object_t * /*item*/, ({ - assert(item != NULL); - - uintptr_t lock_idx = mark_loc(item); - assert(write_locks[lock_idx] == pseg->write_lock_num); - - write_locks[lock_idx] = WL_VISITED; - LIST_APPEND(mark_objects_to_trace, REAL_ADDRESS(base1, item)); - LIST_APPEND(mark_objects_to_trace, REAL_ADDRESS(base2, item)); + mark_from_object(item, base1); + mark_from_object(item, base2); })); } } -static void reset_write_locks(void) -{ - /* the write_locks array, containing the visit marker during - major collection, is cleared in sweep_large_objects() for - large objects, but is not cleared for small objects. - Clear it now. */ - object_t *loc2 = (object_t *)(uninitialized_page_stop - stm_object_pages); - uintptr_t lock2_idx = mark_loc(loc2 - 1) + 1; - -#ifdef STM_TESTS - long _i; - for (_i=0; _imodified_old_objects, - object_t * /*item*/, - ({ - uintptr_t lock_idx = mark_loc(item); - assert(write_locks[lock_idx] == 0); - write_locks[lock_idx] = pseg->write_lock_num; - })); - } -} - -static inline void mark_record_trace(object_t **pobj) -{ - /* takes a normal pointer to a thread-local pointer to an object */ - object_t *obj = *pobj; - - if (obj == NULL) - return; - - if (mark_visited_test_and_set(obj)) - return; /* already visited this object */ - - LIST_APPEND(mark_objects_to_trace, REAL_ADDRESS(stm_object_pages, obj)); -} - -static void mark_collect_roots(void) -{ - stm_thread_local_t *tl = stm_all_thread_locals; - do { - object_t **current = tl->shadowstack; - object_t **base = tl->shadowstack_base; - while (current-- != base) { - assert(*current != (object_t *)-1); - mark_record_trace(current); - } - mark_record_trace(&tl->thread_local_obj); - - tl = tl->next; - } while (tl != stm_all_thread_locals); - - if (testing_prebuilt_objs != NULL) { - LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/, - mark_record_trace(&item)); - } -} - -static void mark_visit_all_objects(void) -{ - while (!list_is_empty(mark_objects_to_trace)) { - struct object_s *obj = - (struct object_s *)list_pop_item(mark_objects_to_trace); - stmcb_trace(obj, &mark_record_trace); - } -} - -static inline bool largemalloc_keep_object_at(char *data) -{ - /* this is called by _stm_largemalloc_sweep() */ - return mark_visited_test_and_clear((object_t *)(data - stm_object_pages)); -} - -static void sweep_large_objects(void) -{ - _stm_largemalloc_sweep(); -} - static void clean_up_segment_lists(void) { long i; @@ -340,6 +302,73 @@ } } +static inline bool largemalloc_keep_object_at(char *data) +{ + /* this is called by _stm_largemalloc_sweep() */ + return mark_visited_test_and_clear((object_t *)(data - stm_object_pages)); +} + +static void sweep_large_objects(void) +{ + _stm_largemalloc_sweep(); +} + +static void clean_write_locks(void) +{ + /* the write_locks array, containing the visit marker during + major collection, is cleared in sweep_large_objects() for + large objects, but is not cleared for small objects. + Clear it now. */ + object_t *loc2 = (object_t *)(uninitialized_page_stop - stm_object_pages); + uintptr_t lock2_idx = mark_loc(loc2 - 1) + 1; + +#ifdef STM_TESTS + long _i; + for (_i=0; _imodified_old_objects, + object_t * /*item*/, + ({ + assert(item != NULL); + + uintptr_t lock_idx = mark_loc(item); + assert(write_locks[lock_idx] == pseg->write_lock_num); + write_locks[lock_idx] = 0; + })); + } +} + +static void major_set_write_locks(void) +{ + /* restore the write locks on the modified objects */ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + + LIST_FOREACH_R( + pseg->modified_old_objects, + object_t * /*item*/, + ({ + uintptr_t lock_idx = mark_loc(item); + assert(write_locks[lock_idx] == 0); + write_locks[lock_idx] = pseg->write_lock_num; + })); + } +} + static void major_collection_now_at_safe_point(void) { dprintf(("\n")); @@ -352,11 +381,12 @@ dprintf((" | used before collection: %ld\n", (long)pages_ctl.total_allocated)); + major_clear_write_locks(); + /* marking */ mark_objects_to_trace = list_create(); - mark_record_modified_objects(); - mark_collect_roots(); - mark_visit_all_objects(); + mark_visit_from_modified_objects(); + mark_visit_from_roots(); list_free(mark_objects_to_trace); mark_objects_to_trace = NULL; @@ -369,7 +399,8 @@ //sweep_uniform_pages(); mutex_pages_unlock(); - reset_write_locks(); + clean_write_locks(); + major_set_write_locks(); dprintf((" | used after collection: %ld\n", (long)pages_ctl.total_allocated)); diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -177,6 +177,7 @@ stm_major_collect() # self.switch(1) + x = self.pop_root() assert stm_get_char(x, size - 1) == 'E' def test_trace_correct_version_of_overflow_objects_2(self): From noreply at buildbot.pypy.org Sat Mar 1 22:46:03 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 22:46:03 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: eliminate small noop diffs with vendor/stdlib Message-ID: <20140301214603.289DE1C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69580:2f4bbe7caa22 Date: 2014-03-01 16:45 -0500 http://bitbucket.org/pypy/pypy/changeset/2f4bbe7caa22/ Log: eliminate small noop diffs with vendor/stdlib diff --git a/lib-python/2.7/distutils/tests/test_build_ext.py b/lib-python/2.7/distutils/tests/test_build_ext.py --- a/lib-python/2.7/distutils/tests/test_build_ext.py +++ b/lib-python/2.7/distutils/tests/test_build_ext.py @@ -289,7 +289,7 @@ finally: os.chdir(old_wd) self.assertTrue(os.path.exists(so_file)) - self.assertEqual(so_file[so_file.index(os.path.extsep):], + self.assertEqual(os.path.splitext(so_file)[-1], sysconfig.get_config_var('SO')) so_dir = os.path.dirname(so_file) self.assertEqual(so_dir, other_tmp_dir) @@ -298,7 +298,7 @@ cmd.run() so_file = cmd.get_outputs()[0] self.assertTrue(os.path.exists(so_file)) - self.assertEqual(so_file[so_file.index(os.path.extsep):], + self.assertEqual(os.path.splitext(so_file)[-1], sysconfig.get_config_var('SO')) so_dir = os.path.dirname(so_file) self.assertEqual(so_dir, cmd.build_lib) diff --git a/lib-python/2.7/sqlite3/test/dbapi.py b/lib-python/2.7/sqlite3/test/dbapi.py --- a/lib-python/2.7/sqlite3/test/dbapi.py +++ b/lib-python/2.7/sqlite3/test/dbapi.py @@ -1,4 +1,4 @@ -#-*- coding: iso-8859-1 -*- +#-*- coding: ISO-8859-1 -*- # pysqlite2/test/dbapi.py: tests for DB-API compliance # # Copyright (C) 2004-2010 Gerhard H�ring From noreply at buildbot.pypy.org Sat Mar 1 22:58:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Mar 2014 22:58:19 +0100 (CET) Subject: [pypy-commit] stmgc default: Fixes until all tests seem to pass. Message-ID: <20140301215819.9C0D31C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r916:dac6b73eede4 Date: 2014-03-01 22:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/dac6b73eede4/ Log: Fixes until all tests seem to pass. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -3,6 +3,9 @@ #endif +static struct list_s *testing_prebuilt_objs = NULL; + + static void setup_gcpage(void) { char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; @@ -17,6 +20,7 @@ { memset(small_alloc, 0, sizeof(small_alloc)); free_uniform_pages = NULL; + LIST_FREE(testing_prebuilt_objs); } @@ -95,8 +99,6 @@ return addr; } -static struct list_s *testing_prebuilt_objs = NULL; - object_t *_stm_allocate_old(ssize_t size_rounded_up) { /* only for tests */ @@ -198,11 +200,8 @@ LIST_APPEND(mark_objects_to_trace, obj); } -static void mark_from_object(object_t *obj, char *segment_base) +static void mark_trace(object_t *obj, char *segment_base) { - if (obj == NULL || mark_visited_test_and_set(obj)) - return; - assert(list_is_empty(mark_objects_to_trace)); while (1) { @@ -217,6 +216,13 @@ } } +static inline void mark_visit_object(object_t *obj, char *segment_base) +{ + if (obj == NULL || mark_visited_test_and_set(obj)) + return; + mark_trace(obj, segment_base); +} + static void mark_visit_from_roots(void) { stm_thread_local_t *tl = stm_all_thread_locals; @@ -232,16 +238,16 @@ object_t **base = tl->shadowstack_base; while (current-- != base) { assert(*current != (object_t *)-1); - mark_from_object(*current, segment_base); + mark_visit_object(*current, segment_base); } - mark_from_object(tl->thread_local_obj, segment_base); + mark_visit_object(tl->thread_local_obj, segment_base); tl = tl->next; } while (tl != stm_all_thread_locals); if (testing_prebuilt_objs != NULL) { LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/, - mark_from_object(item, get_segment_base(0))); + mark_visit_object(item, get_segment_base(0))); } } @@ -259,8 +265,9 @@ get_priv_segment(i)->modified_old_objects, object_t * /*item*/, ({ - mark_from_object(item, base1); - mark_from_object(item, base2); + mark_visited_test_and_set(item); + mark_trace(item, base1); + mark_trace(item, base2); })); } } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -324,13 +324,6 @@ def stm_was_written(o): return lib._stm_was_written(o) -def stm_creation_marker(o): - return lib._stm_creation_marker(o) - -def stm_stop_transaction(): - if lib._stm_stop_transaction(): - raise Conflict() - def stm_start_safe_point(): lib._stm_start_safe_point() diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -164,7 +164,9 @@ def pop_roots(self, ex): for r in reversed(self.saved_roots[self.roots_on_transaction_start:]): ex.do('%s = self.pop_root()' % r) - ex.do('# 0x%x' % (int(ffi.cast("uintptr_t", ex.content[r])),)) + ex.do('# 0x%x, size %d' % ( + int(ffi.cast("uintptr_t", ex.content[r])), + stm_get_obj_size(ex.content[r]))) self.roots_on_stack -= 1 assert self.roots_on_stack == self.roots_on_transaction_start @@ -176,8 +178,9 @@ for r in reversed(to_reload): ex.do('%s = self.pop_root()' % r) for r in to_reload: - ex.do('self.push_root(%s) # 0x%x' % ( - r, int(ffi.cast("uintptr_t", ex.content[r])))) + ex.do('self.push_root(%s) # 0x%x, size %d' % ( + r, int(ffi.cast("uintptr_t", ex.content[r])), + stm_get_obj_size(ex.content[r]))) def start_transaction(self): assert self.transaction_state is None From noreply at buildbot.pypy.org Sat Mar 1 23:00:38 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 23:00:38 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: add new tests to conftest.py Message-ID: <20140301220038.9B8EC1C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69581:319f9797f093 Date: 2014-03-01 16:59 -0500 http://bitbucket.org/pypy/pypy/changeset/319f9797f093/ Log: add new tests to conftest.py diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -261,6 +261,7 @@ RegrTest('test_htmlparser.py'), RegrTest('test_httplib.py'), RegrTest('test_httpservers.py'), + RegrTest('test_idle.py'), RegrTest('test_imageop.py'), RegrTest('test_imaplib.py'), RegrTest('test_imgfile.py'), @@ -293,6 +294,7 @@ RegrTest('test_macos.py'), RegrTest('test_macostools.py'), RegrTest('test_macpath.py'), + RegrTest('test_macurl2path.py'), RegrTest('test_mailbox.py'), RegrTest('test_marshal.py', core=True), RegrTest('test_math.py', core=True, usemodules='math'), @@ -317,6 +319,7 @@ RegrTest('test_new.py', core=True), RegrTest('test_nis.py'), RegrTest('test_normalization.py'), + RegrTest('test_nntplib.py'), RegrTest('test_ntpath.py'), RegrTest('test_old_mailbox.py'), RegrTest('test_opcodes.py', core=True), @@ -397,6 +400,7 @@ RegrTest('test_sqlite.py', usemodules="thread _rawffi zlib"), RegrTest('test_ssl.py', usemodules='_ssl _socket select'), RegrTest('test_startfile.py'), + RegrTest('test_stat.py'), RegrTest('test_str.py', core=True), RegrTest('test_strftime.py'), RegrTest('test_string.py', core=True), @@ -409,6 +413,7 @@ RegrTest('test_structmembers.py', skip="CPython specific"), RegrTest('test_structseq.py'), RegrTest('test_subprocess.py', usemodules='signal'), + RegrTest('test_sunau.py'), RegrTest('test_sunaudiodev.py'), RegrTest('test_sundry.py'), RegrTest('test_symtable.py', skip="implementation detail"), From noreply at buildbot.pypy.org Sat Mar 1 23:40:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 1 Mar 2014 23:40:19 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fixes for audioop Message-ID: <20140301224019.C09CA1C1504@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69582:781b1f702577 Date: 2014-03-01 17:30 -0500 http://bitbucket.org/pypy/pypy/changeset/781b1f702577/ Log: fixes for audioop diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -1,3 +1,4 @@ +from __future__ import division import __builtin__ as builtins import math import struct @@ -24,7 +25,7 @@ def _sample_count(cp, size): - return len(cp) / size + return len(cp) // size def _get_samples(cp, size, signed=True): @@ -101,7 +102,7 @@ def getsample(cp, size, i): _check_params(len(cp), size) - if not (0 <= i < len(cp) / size): + if not (0 <= i < len(cp) // size): raise error("Index out of range") return _get_sample(cp, size, i) @@ -118,7 +119,7 @@ def minmax(cp, size): _check_params(len(cp), size) - max_sample, min_sample = 0, 0 + min_sample, max_sample = 0x7fffffff, -0x80000000 for sample in _get_samples(cp, size): max_sample = builtins.max(sample, max_sample) min_sample = builtins.min(sample, min_sample) @@ -131,7 +132,7 @@ sample_count = _sample_count(cp, size) if sample_count == 0: return 0 - return sum(_get_samples(cp, size)) / sample_count + return sum(_get_samples(cp, size)) // sample_count def rms(cp, size): @@ -142,7 +143,7 @@ return 0 sum_squares = sum(sample**2 for sample in _get_samples(cp, size)) - return int(math.sqrt(sum_squares / sample_count)) + return int(math.sqrt(sum_squares // sample_count)) def _sum2(cp1, cp2, length): @@ -241,6 +242,8 @@ def avgpp(cp, size): _check_params(len(cp), size) sample_count = _sample_count(cp, size) + if sample_count <= 2: + return 0 prevextremevalid = False prevextreme = None @@ -271,12 +274,14 @@ if nextreme == 0: return 0 - return avg / nextreme + return avg // nextreme def maxpp(cp, size): _check_params(len(cp), size) sample_count = _sample_count(cp, size) + if sample_count <= 1: + return 0 prevextremevalid = False prevextreme = None @@ -309,13 +314,13 @@ def cross(cp, size): _check_params(len(cp), size) - crossings = 0 - last_sample = 0 + crossings = -1 + last_sample = 17 for sample in _get_samples(cp, size): - if sample <= 0 < last_sample or sample >= 0 > last_sample: + sample = sample < 0 + if sample != last_sample: crossings += 1 last_sample = sample - return crossings @@ -338,7 +343,7 @@ sample_count = _sample_count(cp, size) - result = create_string_buffer(len(cp) / 2) + result = create_string_buffer(len(cp) // 2) for i in range(0, sample_count, 2): l_sample = getsample(cp, size, i) @@ -347,7 +352,7 @@ sample = (l_sample * fac1) + (r_sample * fac2) sample = clip(sample) - _put_sample(result, size, i / 2, sample) + _put_sample(result, size, i // 2, sample) return result.raw @@ -423,19 +428,20 @@ if size == size2: return cp - new_len = (len(cp) / size) * size2 - + new_len = (len(cp) // size) * size2 result = create_string_buffer(new_len) for i in range(_sample_count(cp, size)): sample = _get_sample(cp, size, i) - if size < size2: - sample = sample << (4 * size2 / size) - elif size > size2: - sample = sample >> (4 * size / size2) - + if size == 1: + sample <<= 24 + elif size == 2: + sample <<= 16 + if size2 == 1: + sample >>= 24 + elif size2 == 2: + sample >>= 16 sample = _overflow(sample, size2) - _put_sample(result, size2, i, sample) return result.raw @@ -447,9 +453,9 @@ raise error("# of channels should be >= 1") bytes_per_frame = size * nchannels - frame_count = len(cp) / bytes_per_frame + frame_count = len(cp) // bytes_per_frame - if bytes_per_frame / nchannels != size: + if bytes_per_frame // nchannels != size: raise OverflowError("width * nchannels too big for a C int") if weightA < 1 or weightB < 0: @@ -462,8 +468,8 @@ raise error("sampling rate not > 0") d = gcd(inrate, outrate) - inrate /= d - outrate /= d + inrate //= d + outrate //= d prev_i = [0] * nchannels cur_i = [0] * nchannels @@ -479,7 +485,7 @@ prev_i, cur_i = zip(*samps) prev_i, cur_i = list(prev_i), list(cur_i) - q = frame_count / inrate + q = frame_count // inrate ceiling = (q + 1) * outrate nbytes = ceiling * bytes_per_frame @@ -505,7 +511,7 @@ cur_i[chan] = ( (weightA * cur_i[chan] + weightB * prev_i[chan]) - / (weightA + weightB) + // (weightA + weightB) ) frame_count -= 1 @@ -515,7 +521,7 @@ for chan in range(nchannels): cur_o = ( (prev_i[chan] * d + cur_i[chan] * (outrate - d)) - / outrate + // outrate ) _put_sample(result, size, out_i, _overflow(cur_o, size)) out_i += 1 From noreply at buildbot.pypy.org Sun Mar 2 00:38:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 00:38:47 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140301233847.3D4511C3427@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69583:76a313cc0346 Date: 2014-03-01 15:29 -0500 http://bitbucket.org/pypy/pypy/changeset/76a313cc0346/ Log: cleanup diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -2,7 +2,6 @@ class AppTestStringObjectWithDict: - def test_format_item(self): d = {'i': 23} assert 'a23b' == 'a%(i)sb' % d @@ -36,18 +35,20 @@ assert '' % d == '' n = 5 raises(TypeError, "'' % n") + class MyMapping(object): def __getitem__(self, key): py.test.fail('should not be here') assert '' % MyMapping() == '' + class MyMapping2(object): def __getitem__(self, key): return key assert '%(key)s'%MyMapping2() == 'key' assert u'%(key)s'%MyMapping2() == u'key' + class AppTestStringObject: - def test_format_item(self): n = 23 assert 'a23b' == 'a%sb' % n @@ -130,7 +131,7 @@ def test_format_long(self): l = 4800000000L assert '%d' % l == '4800000000' - # + class SubLong(long): pass sl = SubLong(l) @@ -147,7 +148,6 @@ assert '<(1, 2)-(3, 4)>' == '<%s-%s>' % (t, (3,4)) def test_format_dict(self): - # I'll just note that the first of these two completely # contradicts what CPython's documentation says: @@ -182,7 +182,7 @@ raises(TypeError, '%c'.__mod__, ("bla",)) raises(TypeError, '%c'.__mod__, ("",)) raises(TypeError, '%c'.__mod__, (['c'],)) - + def test_broken_unicode(self): raises(UnicodeDecodeError, 'Názov: %s'.__mod__, u'Jerry') @@ -192,16 +192,10 @@ self.x = x def __int__(self): return self.x - # x = MyInt(65) assert '%c' % x == 'A' -class Foo(object): - def __cmp__(self, other): - return MyInt(0) - - class AppTestWidthPrec: def test_width(self): a = 'a' @@ -242,7 +236,6 @@ assert "%-05g" % ttf =="2.25 " assert "%05s" % ttf == " 2.25" - def test_star_width(self): f = 5 assert "%*s" %( f, 'abc') == ' abc' @@ -281,6 +274,7 @@ assert "%F" % (nan,) == 'NAN' assert "%G" % (nan,) == 'NAN' + class AppTestUnicodeObject: def test_unicode_convert(self): u = u"x" From noreply at buildbot.pypy.org Sun Mar 2 00:38:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 00:38:48 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: merge default Message-ID: <20140301233848.721A71C3427@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69584:80b44c6a8346 Date: 2014-03-01 17:43 -0500 http://bitbucket.org/pypy/pypy/changeset/80b44c6a8346/ Log: merge default diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -2,7 +2,6 @@ class AppTestStringObjectWithDict: - def test_format_item(self): d = {'i': 23} assert 'a23b' == 'a%(i)sb' % d @@ -36,18 +35,20 @@ assert '' % d == '' n = 5 raises(TypeError, "'' % n") + class MyMapping(object): def __getitem__(self, key): py.test.fail('should not be here') assert '' % MyMapping() == '' + class MyMapping2(object): def __getitem__(self, key): return key assert '%(key)s'%MyMapping2() == 'key' assert u'%(key)s'%MyMapping2() == u'key' + class AppTestStringObject: - def test_format_item(self): n = 23 assert 'a23b' == 'a%sb' % n @@ -130,7 +131,7 @@ def test_format_long(self): l = 4800000000L assert '%d' % l == '4800000000' - # + class SubLong(long): pass sl = SubLong(l) @@ -147,7 +148,6 @@ assert '<(1, 2)-(3, 4)>' == '<%s-%s>' % (t, (3,4)) def test_format_dict(self): - # I'll just note that the first of these two completely # contradicts what CPython's documentation says: @@ -182,7 +182,7 @@ raises(TypeError, '%c'.__mod__, ("bla",)) raises(TypeError, '%c'.__mod__, ("",)) raises(TypeError, '%c'.__mod__, (['c'],)) - + def test_broken_unicode(self): raises(UnicodeDecodeError, 'Názov: %s'.__mod__, u'Jerry') @@ -192,7 +192,6 @@ self.x = x def __int__(self): return self.x - # x = MyInt(65) assert '%c' % x == 'A' @@ -206,11 +205,6 @@ assert "%x" % IntFails() == '0' -class Foo(object): - def __cmp__(self, other): - return MyInt(0) - - class AppTestWidthPrec: def test_width(self): a = 'a' @@ -251,7 +245,6 @@ assert "%-05g" % ttf =="2.25 " assert "%05s" % ttf == " 2.25" - def test_star_width(self): f = 5 assert "%*s" %( f, 'abc') == ' abc' @@ -290,6 +283,7 @@ assert "%F" % (nan,) == 'NAN' assert "%G" % (nan,) == 'NAN' + class AppTestUnicodeObject: def test_unicode_convert(self): u = u"x" From noreply at buildbot.pypy.org Sun Mar 2 00:38:49 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 00:38:49 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix newformat overflow check Message-ID: <20140301233849.A0A971C3427@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69585:e95890c046a1 Date: 2014-03-01 18:04 -0500 http://bitbucket.org/pypy/pypy/changeset/e95890c046a1/ Log: fix newformat overflow check diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -1,12 +1,12 @@ """The unicode/str format() method""" +import sys import string -from pypy.interpreter.error import OperationError -from rpython.rlib import rstring, runicode, rlocale, rarithmetic, rfloat, jit +from pypy.interpreter.error import OperationError, oefmt +from rpython.rlib import rstring, runicode, rlocale, rfloat, jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rfloat import copysign, formatd -from rpython.tool import sourcetools @specialize.argtype(1) @@ -19,14 +19,12 @@ result = 0 i = start while i < end: - c = ord(s[i]) - if ord("0") <= c <= ord("9"): - try: - result = rarithmetic.ovfcheck(result * 10) - except OverflowError: - msg = "too many decimal digits in format string" - raise OperationError(space.w_ValueError, space.wrap(msg)) - result += c - ord("0") + digit = ord(s[i]) - ord('0') + if 0 <= digit <= 9: + if result > (sys.maxsize - digit) / 10: + raise oefmt(space.w_ValueError, + "too many decimal digits in format string") + result = result * 10 + digit else: break i += 1 @@ -384,8 +382,8 @@ class NumberSpec(object): pass + class BaseFormatter(object): - def format_int_or_long(self, w_num, kind): raise NotImplementedError diff --git a/pypy/objspace/std/test/test_newformat.py b/pypy/objspace/std/test/test_newformat.py --- a/pypy/objspace/std/test/test_newformat.py +++ b/pypy/objspace/std/test/test_newformat.py @@ -169,9 +169,23 @@ raises(ValueError, '{0!r'.format, 5) raises(ValueError, '{0!rs}'.format, 5) + def test_format_huge_precision(self): + import sys + format_string = self.s(".{}f").format(sys.maxsize + 1) + raises(ValueError, "format(2.34, format_string)") + + def test_format_huge_width(self): + import sys + format_string = self.s("{}f").format(sys.maxsize + 1) + raises(ValueError, "format(2.34, format_string)") + + def test_format_huge_item_number(self): + import sys + format_string = self.s("{{{}:.6f}}").format(sys.maxsize + 1) + raises(ValueError, "format(2.34, format_string)") + class AppTestUnicodeFormat(BaseStringFormatTests): - def setup_class(cls): cls.w_s = cls.space.w_unicode @@ -190,9 +204,7 @@ raises(KeyError, self.s("{\u1000}").format) - class AppTestStringFormat(BaseStringFormatTests): - def setup_class(cls): cls.w_s = cls.space.w_str @@ -209,7 +221,6 @@ class AppTestBoolFormat: - def test_str_format(self): assert format(False) == "False" assert format(True) == "True" @@ -224,9 +235,7 @@ assert "{:g}".format(True) == "1" - class BaseIntegralFormattingTest: - def test_simple(self): assert format(self.i(2)) == "2" assert isinstance(format(self.i(2), u""), unicode) @@ -302,13 +311,11 @@ class AppTestIntFormatting(BaseIntegralFormattingTest): - def setup_class(cls): cls.w_i = cls.space.w_int class AppTestLongFormatting(BaseIntegralFormattingTest): - def setup_class(cls): cls.w_i = cls.space.w_long From noreply at buildbot.pypy.org Sun Mar 2 00:38:50 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 00:38:50 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix old format overflow check Message-ID: <20140301233850.C61A61C3427@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69586:de582fef597e Date: 2014-03-01 18:37 -0500 http://bitbucket.org/pypy/pypy/changeset/de582fef597e/ Log: fix old format overflow check diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -1,7 +1,8 @@ """ String formatting routines. """ -from pypy.interpreter.error import OperationError +import sys +from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit from rpython.rlib.rarithmetic import ovfcheck from rpython.rlib.rfloat import formatd, DTSF_ALT, isnan, isinf @@ -217,7 +218,7 @@ self.peel_flags() - self.width = self.peel_num() + self.width = self.peel_num('width', sys.maxsize) if self.width < 0: # this can happen: '%*s' % (-5, "hi") self.f_ljust = True @@ -225,7 +226,7 @@ if self.peekchr() == '.': self.forward() - self.prec = self.peel_num() + self.prec = self.peel_num('prec', sys.maxint) if self.prec < 0: self.prec = 0 # this can happen: '%.*f' % (-5, 3) else: @@ -263,7 +264,7 @@ # Same as getmappingkey @jit.unroll_safe - def peel_num(self): + def peel_num(self, name, maxval): space = self.space c = self.peekchr() if c == '*': @@ -272,14 +273,12 @@ return space.int_w(maybe_int(space, w_value)) result = 0 while True: - n = ord(c) - ord('0') - if not (0 <= n < 10): + digit = ord(c) - ord('0') + if not (0 <= digit <= 9): break - try: - result = ovfcheck(ovfcheck(result * 10) + n) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("precision too large")) + if result > (maxval - digit) / 10: + raise oefmt(space.w_ValueError, "%s too big", name) + result = result * 10 + digit self.forward() c = self.peekchr() return result diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -204,6 +204,17 @@ assert "%x" % IntFails() == '0' + def test_formatting_huge_precision(self): + import sys + format_string = "%.{}f".format(sys.maxint + 1) + exc = raises(ValueError, "format_string % 2.34") + assert exc.value[0] == 'prec too big' + + def test_formatting_huge_width(self): + import sys + format_string = "%{}f".format(sys.maxsize + 1) + exc = raises(ValueError, "format_string % 2.34") + assert exc.value[0] == 'width too big' class AppTestWidthPrec: def test_width(self): @@ -324,3 +335,15 @@ def test_invalid_char(self): f = 4 raises(ValueError, 'u"%\u1234" % (f,)') + + def test_formatting_huge_precision(self): + import sys + format_string = u"%.{}f".format(sys.maxint + 1) + exc = raises(ValueError, "format_string % 2.34") + assert exc.value[0] == 'prec too big' + + def test_formatting_huge_width(self): + import sys + format_string = u"%{}f".format(sys.maxsize + 1) + exc = raises(ValueError, "format_string % 2.34") + assert exc.value[0] == 'width too big' From noreply at buildbot.pypy.org Sun Mar 2 00:51:49 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 00:51:49 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: disable ulaw audio tests that depend on unimplemented audioop functions Message-ID: <20140301235149.9183F1C1504@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69587:972f0fa2922a Date: 2014-03-01 18:47 -0500 http://bitbucket.org/pypy/pypy/changeset/972f0fa2922a/ Log: disable ulaw audio tests that depend on unimplemented audioop functions diff --git a/lib-python/2.7/test/test_aifc.py b/lib-python/2.7/test/test_aifc.py --- a/lib-python/2.7/test/test_aifc.py +++ b/lib-python/2.7/test/test_aifc.py @@ -347,7 +347,7 @@ def test_main(): run_unittest(AifcPCM8Test, AifcPCM16Test, AifcPCM16Test, AifcPCM24Test, - AifcPCM32Test, AifcULAWTest, + AifcPCM32Test, #AifcULAWTest, unimpl funcs in lib_pypy/audioop.py AifcMiscTest, AIFCLowLevelTest) if __name__ == "__main__": diff --git a/lib-python/2.7/test/test_sunau.py b/lib-python/2.7/test/test_sunau.py --- a/lib-python/2.7/test/test_sunau.py +++ b/lib-python/2.7/test/test_sunau.py @@ -101,7 +101,7 @@ def test_main(): run_unittest(SunauPCM8Test, SunauPCM16Test, SunauPCM16Test, - SunauPCM32Test, SunauULAWTest) + SunauPCM32Test)#, SunauULAWTest) unimpl funcs in lib_pypy/audioop.py if __name__ == "__main__": test_main() From noreply at buildbot.pypy.org Sun Mar 2 01:19:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 01:19:17 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: reapply pypy random.py modifications Message-ID: <20140302001917.77A141C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69588:cc9aacc860c0 Date: 2014-03-01 19:06 -0500 http://bitbucket.org/pypy/pypy/changeset/cc9aacc860c0/ Log: reapply pypy random.py modifications diff --git a/lib-python/2.7/random.py b/lib-python/2.7/random.py --- a/lib-python/2.7/random.py +++ b/lib-python/2.7/random.py @@ -41,7 +41,6 @@ from __future__ import division from warnings import warn as _warn -from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin from os import urandom as _urandom @@ -239,8 +238,7 @@ return self.randrange(a, b+1) - def _randbelow(self, n, _log=_log, _int=int, _maxwidth=1L< n-1 > 2**(k-2) r = getrandbits(k) while r >= n: From noreply at buildbot.pypy.org Sun Mar 2 01:19:18 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 01:19:18 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix fileio modes Message-ID: <20140302001918.D36401C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69589:d91c3fa0dd7a Date: 2014-03-01 19:18 -0500 http://bitbucket.org/pypy/pypy/changeset/d91c3fa0dd7a/ Log: fix fileio modes diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -60,7 +60,6 @@ finally: f.close() - def test_fdopen(self): import os f = self.file(self.temppath, "w") diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -60,8 +60,8 @@ _bad_mode(space) rwa = True writable = True - flags |= O_CREAT append = True + flags |= O_APPEND | O_CREAT elif s == 'b': pass elif s == '+': @@ -85,9 +85,6 @@ flags |= O_BINARY - if append: - flags |= O_APPEND - return readable, writable, append, flags SMALLCHUNK = 8 * 1024 @@ -123,6 +120,7 @@ self.fd = -1 self.readable = False self.writable = False + self.appending = False self.seekable = -1 self.closefd = True self.w_name = None @@ -148,7 +146,7 @@ raise OperationError(space.w_ValueError, space.wrap( "negative file descriptor")) - self.readable, self.writable, append, flags = decode_mode(space, mode) + self.readable, self.writable, self.appending, flags = decode_mode(space, mode) fd_is_own = False try: @@ -181,7 +179,7 @@ self._dircheck(space, w_name) space.setattr(self, space.wrap("name"), w_name) - if append: + if self.appending: # For consistent behaviour, we explicitly seek to the end of file # (otherwise, it might be done only on the first write()). try: @@ -194,7 +192,12 @@ raise def _mode(self): - if self.readable: + if self.appending: + if self.readable: + return 'ab+' + else: + return 'ab' + elif self.readable: if self.writable: return 'rb+' else: diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -1,6 +1,7 @@ from rpython.tool.udir import udir import os + class AppTestFileIO: spaceconfig = dict(usemodules=['_io'] + (['fcntl'] if os.name != 'nt' else [])) @@ -17,7 +18,7 @@ import _io f = _io.FileIO(self.tmpfile, 'a') assert f.name.endswith('tmpfile') - assert f.mode == 'wb' + assert f.mode == 'ab' assert f.closefd is True f.close() @@ -191,6 +192,22 @@ raises(MyException, MyFileIO, fd) os.close(fd) # should not raise OSError(EBADF) + def test_mode_strings(self): + import _io + import os + try: + for modes in [('w', 'wb'), ('wb', 'wb'), ('wb+', 'rb+'), + ('w+b', 'rb+'), ('a', 'ab'), ('ab', 'ab'), + ('ab+', 'ab+'), ('a+b', 'ab+'), ('r', 'rb'), + ('rb', 'rb'), ('rb+', 'rb+'), ('r+b', 'rb+')]: + # read modes are last so that TESTFN will exist first + with _io.FileIO(self.tmpfile, modes[0]) as f: + assert f.mode == modes[1] + finally: + if os.path.exists(self.tmpfile): + os.unlink(self.tmpfile) + + def test_flush_at_exit(): from pypy import conftest from pypy.tool.option import make_config, make_objspace @@ -209,6 +226,7 @@ space.finish() assert tmpfile.read() == '42' + def test_flush_at_exit_IOError_and_ValueError(): from pypy import conftest from pypy.tool.option import make_config, make_objspace From noreply at buildbot.pypy.org Sun Mar 2 02:19:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 02:19:54 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix _ssl parsing of nullbytecert Message-ID: <20140302011954.EF30C1C3427@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69590:553b9635e2e5 Date: 2014-03-01 20:17 -0500 http://bitbucket.org/pypy/pypy/changeset/553b9635e2e5/ Log: fix _ssl parsing of nullbytecert diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -578,18 +578,36 @@ # Get a rendering of each name in the set of names name = libssl_sk_GENERAL_NAME_value(names, j) - if intmask(name[0].c_type) == GEN_DIRNAME: - + gntype = intmask(name[0].c_type) + if gntype == GEN_DIRNAME: # we special-case DirName as a tuple of tuples of attributes dirname = libssl_pypy_GENERAL_NAME_dirn(name) w_t = space.newtuple([ space.wrap("DirName"), _create_tuple_for_X509_NAME(space, dirname) ]) + elif gntype in (GEN_EMAIL, GEN_DNS, GEN_URI): + # GENERAL_NAME_print() doesn't handle NULL bytes in ASN1_string + # correctly, CVE-2013-4238 + if gntype == GEN_EMAIL: + v = space.wrap("email") + elif gntype == GEN_DNS: + v = space.wrap("DNS") + elif gntype == GEN_URI: + v = space.wrap("URI") + else: + assert False + as_ = name[0].c_d + buf = libssl_ASN1_STRING_data(as_) + length = libssl_ASN1_STRING_length(as_) + w_t = space.newtuple([v, + space.wrap(rffi.charpsize2str(buf, length))]) else: - # for everything else, we use the OpenSSL print form - + if gntype not in (GEN_OTHERNAME, GEN_X400, GEN_EDIPARTY, + GEN_IPADD, GEN_RID): + space.warn(space.wrap("Unknown general name type"), + space.w_RuntimeWarning) libssl_BIO_reset(biobuf) libssl_GENERAL_NAME_print(biobuf, name) with lltype.scoped_alloc(rffi.CCHARP.TO, 2048) as buf: diff --git a/pypy/module/_ssl/test/nullbytecert.pem b/pypy/module/_ssl/test/nullbytecert.pem new file mode 100644 --- /dev/null +++ b/pypy/module/_ssl/test/nullbytecert.pem @@ -0,0 +1,90 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 0 (0x0) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=Oregon, L=Beaverton, O=Python Software Foundation, OU=Python Core Development, CN=null.python.org\x00example.org/emailAddress=python-dev at python.org + Validity + Not Before: Aug 7 13:11:52 2013 GMT + Not After : Aug 7 13:12:52 2013 GMT + Subject: C=US, ST=Oregon, L=Beaverton, O=Python Software Foundation, OU=Python Core Development, CN=null.python.org\x00example.org/emailAddress=python-dev at python.org + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:b5:ea:ed:c9:fb:46:7d:6f:3b:76:80:dd:3a:f3: + 03:94:0b:a7:a6:db:ec:1d:df:ff:23:74:08:9d:97: + 16:3f:a3:a4:7b:3e:1b:0e:96:59:25:03:a7:26:e2: + 88:a9:cf:79:cd:f7:04:56:b0:ab:79:32:6e:59:c1: + 32:30:54:eb:58:a8:cb:91:f0:42:a5:64:27:cb:d4: + 56:31:88:52:ad:cf:bd:7f:f0:06:64:1f:cc:27:b8: + a3:8b:8c:f3:d8:29:1f:25:0b:f5:46:06:1b:ca:02: + 45:ad:7b:76:0a:9c:bf:bb:b9:ae:0d:16:ab:60:75: + ae:06:3e:9c:7c:31:dc:92:2f:29:1a:e0:4b:0c:91: + 90:6c:e9:37:c5:90:d7:2a:d7:97:15:a3:80:8f:5d: + 7b:49:8f:54:30:d4:97:2c:1c:5b:37:b5:ab:69:30: + 68:43:d3:33:78:4b:02:60:f5:3c:44:80:a1:8f:e7: + f0:0f:d1:5e:87:9e:46:cf:62:fc:f9:bf:0c:65:12: + f1:93:c8:35:79:3f:c8:ec:ec:47:f5:ef:be:44:d5: + ae:82:1e:2d:9a:9f:98:5a:67:65:e1:74:70:7c:cb: + d3:c2:ce:0e:45:49:27:dc:e3:2d:d4:fb:48:0e:2f: + 9e:77:b8:14:46:c0:c4:36:ca:02:ae:6a:91:8c:da: + 2f:85 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: critical + CA:FALSE + X509v3 Subject Key Identifier: + 88:5A:55:C0:52:FF:61:CD:52:A3:35:0F:EA:5A:9C:24:38:22:F7:5C + X509v3 Key Usage: + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Subject Alternative Name: + ************************************************************* + WARNING: The values for DNS, email and URI are WRONG. OpenSSL + doesn't print the text after a NULL byte. + ************************************************************* + DNS:altnull.python.org, email:null at python.org, URI:http://null.python.org, IP Address:192.0.2.1, IP Address:2001:DB8:0:0:0:0:0:1 + Signature Algorithm: sha1WithRSAEncryption + ac:4f:45:ef:7d:49:a8:21:70:8e:88:59:3e:d4:36:42:70:f5: + a3:bd:8b:d7:a8:d0:58:f6:31:4a:b1:a4:a6:dd:6f:d9:e8:44: + 3c:b6:0a:71:d6:7f:b1:08:61:9d:60:ce:75:cf:77:0c:d2:37: + 86:02:8d:5e:5d:f9:0f:71:b4:16:a8:c1:3d:23:1c:f1:11:b3: + 56:6e:ca:d0:8d:34:94:e6:87:2a:99:f2:ae:ae:cc:c2:e8:86: + de:08:a8:7f:c5:05:fa:6f:81:a7:82:e6:d0:53:9d:34:f4:ac: + 3e:40:fe:89:57:7a:29:a4:91:7e:0b:c6:51:31:e5:10:2f:a4: + 60:76:cd:95:51:1a:be:8b:a1:b0:fd:ad:52:bd:d7:1b:87:60: + d2:31:c7:17:c4:18:4f:2d:08:25:a3:a7:4f:b7:92:ca:e2:f5: + 25:f1:54:75:81:9d:b3:3d:61:a2:f7:da:ed:e1:c6:6f:2c:60: + 1f:d8:6f:c5:92:05:ab:c9:09:62:49:a9:14:ad:55:11:cc:d6: + 4a:19:94:99:97:37:1d:81:5f:8b:cf:a3:a8:96:44:51:08:3d: + 0b:05:65:12:eb:b6:70:80:88:48:72:4f:c6:c2:da:cf:cd:8e: + 5b:ba:97:2f:60:b4:96:56:49:5e:3a:43:76:63:04:be:2a:f6: + c1:ca:a9:94 +-----BEGIN CERTIFICATE----- +MIIE2DCCA8CgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBxTELMAkGA1UEBhMCVVMx +DzANBgNVBAgMBk9yZWdvbjESMBAGA1UEBwwJQmVhdmVydG9uMSMwIQYDVQQKDBpQ +eXRob24gU29mdHdhcmUgRm91bmRhdGlvbjEgMB4GA1UECwwXUHl0aG9uIENvcmUg +RGV2ZWxvcG1lbnQxJDAiBgNVBAMMG251bGwucHl0aG9uLm9yZwBleGFtcGxlLm9y +ZzEkMCIGCSqGSIb3DQEJARYVcHl0aG9uLWRldkBweXRob24ub3JnMB4XDTEzMDgw +NzEzMTE1MloXDTEzMDgwNzEzMTI1MlowgcUxCzAJBgNVBAYTAlVTMQ8wDQYDVQQI +DAZPcmVnb24xEjAQBgNVBAcMCUJlYXZlcnRvbjEjMCEGA1UECgwaUHl0aG9uIFNv +ZnR3YXJlIEZvdW5kYXRpb24xIDAeBgNVBAsMF1B5dGhvbiBDb3JlIERldmVsb3Bt +ZW50MSQwIgYDVQQDDBtudWxsLnB5dGhvbi5vcmcAZXhhbXBsZS5vcmcxJDAiBgkq +hkiG9w0BCQEWFXB5dGhvbi1kZXZAcHl0aG9uLm9yZzCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBALXq7cn7Rn1vO3aA3TrzA5QLp6bb7B3f/yN0CJ2XFj+j +pHs+Gw6WWSUDpybiiKnPec33BFawq3kyblnBMjBU61ioy5HwQqVkJ8vUVjGIUq3P +vX/wBmQfzCe4o4uM89gpHyUL9UYGG8oCRa17dgqcv7u5rg0Wq2B1rgY+nHwx3JIv +KRrgSwyRkGzpN8WQ1yrXlxWjgI9de0mPVDDUlywcWze1q2kwaEPTM3hLAmD1PESA +oY/n8A/RXoeeRs9i/Pm/DGUS8ZPINXk/yOzsR/XvvkTVroIeLZqfmFpnZeF0cHzL +08LODkVJJ9zjLdT7SA4vnne4FEbAxDbKAq5qkYzaL4UCAwEAAaOB0DCBzTAMBgNV +HRMBAf8EAjAAMB0GA1UdDgQWBBSIWlXAUv9hzVKjNQ/qWpwkOCL3XDALBgNVHQ8E +BAMCBeAwgZAGA1UdEQSBiDCBhYIeYWx0bnVsbC5weXRob24ub3JnAGV4YW1wbGUu +Y29tgSBudWxsQHB5dGhvbi5vcmcAdXNlckBleGFtcGxlLm9yZ4YpaHR0cDovL251 +bGwucHl0aG9uLm9yZwBodHRwOi8vZXhhbXBsZS5vcmeHBMAAAgGHECABDbgAAAAA +AAAAAAAAAAEwDQYJKoZIhvcNAQEFBQADggEBAKxPRe99SaghcI6IWT7UNkJw9aO9 +i9eo0Fj2MUqxpKbdb9noRDy2CnHWf7EIYZ1gznXPdwzSN4YCjV5d+Q9xtBaowT0j +HPERs1ZuytCNNJTmhyqZ8q6uzMLoht4IqH/FBfpvgaeC5tBTnTT0rD5A/olXeimk +kX4LxlEx5RAvpGB2zZVRGr6LobD9rVK91xuHYNIxxxfEGE8tCCWjp0+3ksri9SXx +VHWBnbM9YaL32u3hxm8sYB/Yb8WSBavJCWJJqRStVRHM1koZlJmXNx2BX4vPo6iW +RFEIPQsFZRLrtnCAiEhyT8bC2s/Njlu6ly9gtJZWSV46Q3ZjBL4q9sHKqZQ= +-----END CERTIFICATE----- diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -1,9 +1,14 @@ class AppTestSSL: spaceconfig = dict(usemodules=('_ssl', '_socket')) + def setup_class(cls): + import os + cls.w_nullbytecert = cls.space.wrap(os.path.join( + os.path.dirname(__file__), 'nullbytecert.pem')) + def test_init_module(self): import _ssl - + def test_sslerror(self): import _ssl, _socket assert issubclass(_ssl.SSLError, Exception) @@ -12,7 +17,7 @@ def test_constants(self): import _ssl - + assert isinstance(_ssl.SSL_ERROR_ZERO_RETURN, int) assert isinstance(_ssl.SSL_ERROR_WANT_READ, int) assert isinstance(_ssl.SSL_ERROR_WANT_WRITE, int) @@ -27,7 +32,7 @@ assert len(_ssl.OPENSSL_VERSION_INFO) == 5 assert isinstance(_ssl.OPENSSL_VERSION, str) assert 'openssl' in _ssl.OPENSSL_VERSION.lower() - + def test_RAND_add(self): import _ssl if not hasattr(_ssl, "RAND_add"): @@ -35,13 +40,13 @@ raises(TypeError, _ssl.RAND_add, 4, 4) raises(TypeError, _ssl.RAND_add, "xyz", "zyx") _ssl.RAND_add("xyz", 1.2345) - + def test_RAND_status(self): import _ssl if not hasattr(_ssl, "RAND_status"): skip("RAND_status is not available on this machine") _ssl.RAND_status() - + def test_RAND_egd(self): import _ssl, os, stat if not hasattr(_ssl, "RAND_egd"): @@ -80,6 +85,25 @@ del exc, ss, s gc.collect() # force the destructor() to be called now + def test_test_decode_nullbytecert(self): + import _ssl + p = _ssl._test_decode_cert(self.nullbytecert) + subject = ((('countryName', 'US'),), + (('stateOrProvinceName', 'Oregon'),), + (('localityName', 'Beaverton'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'Python Core Development'),), + (('commonName', 'null.python.org\x00example.org'),), + (('emailAddress', 'python-dev at python.org'),)) + assert p['subject'] == subject + assert p['issuer'] == subject + assert p['subjectAltName'] == \ + (('DNS', 'altnull.python.org\x00example.com'), + ('email', 'null at python.org\x00user at example.org'), + ('URI', 'http://null.python.org\x00http://example.org'), + ('IP Address', '192.0.2.1'), + ('IP Address', '2001:DB8:0:0:0:0:0:1\n')) + class AppTestConnectedSSL: spaceconfig = { diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -105,6 +105,14 @@ NID_subject_alt_name = rffi_platform.ConstantInteger("NID_subject_alt_name") GEN_DIRNAME = rffi_platform.ConstantInteger("GEN_DIRNAME") + GEN_EMAIL = rffi_platform.ConstantInteger("GEN_EMAIL") + GEN_DNS = rffi_platform.ConstantInteger("GEN_DNS") + GEN_URI = rffi_platform.ConstantInteger("GEN_URI") + GEN_OTHERNAME = rffi_platform.ConstantInteger("GEN_OTHERNAME") + GEN_X400 = rffi_platform.ConstantInteger("GEN_X400") + GEN_EDIPARTY = rffi_platform.ConstantInteger("GEN_EDIPARTY") + GEN_IPADD = rffi_platform.ConstantInteger("GEN_IPADD") + GEN_RID = rffi_platform.ConstantInteger("GEN_RID") CRYPTO_LOCK = rffi_platform.ConstantInteger("CRYPTO_LOCK") @@ -129,7 +137,7 @@ GENERAL_NAME_st = rffi_platform.Struct( 'struct GENERAL_NAME_st', [('type', rffi.INT), - ]) + ('d', ASN1_STRING)]) EVP_MD_st = rffi_platform.Struct( 'EVP_MD', [('md_size', rffi.INT), @@ -260,6 +268,8 @@ ssl_external('OBJ_obj2txt', [rffi.CCHARP, rffi.INT, ASN1_OBJECT, rffi.INT], rffi.INT) +ssl_external('ASN1_STRING_data', [ASN1_STRING], rffi.CCHARP) +ssl_external('ASN1_STRING_length', [ASN1_STRING], rffi.INT) ssl_external('ASN1_STRING_to_UTF8', [rffi.CCHARPP, ASN1_STRING], rffi.INT) ssl_external('ASN1_TIME_print', [BIO, ASN1_TIME], rffi.INT) ssl_external('i2a_ASN1_INTEGER', [BIO, ASN1_INTEGER], rffi.INT) From noreply at buildbot.pypy.org Sun Mar 2 02:42:07 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 2 Mar 2014 02:42:07 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: cpython issue13612: avoid crashing in the case of multibyte encodings. Message-ID: <20140302014207.ECB681C02C1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.6 Changeset: r69591:a4e6cfbf8f11 Date: 2014-03-01 17:41 -0800 http://bitbucket.org/pypy/pypy/changeset/a4e6cfbf8f11/ Log: cpython issue13612: avoid crashing in the case of multibyte encodings. also, fix a thinko in UnknownEncodingHandlerData_callback diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -336,7 +336,7 @@ try: parser.UnknownEncodingHandler(space, name, info) except OperationError, e: - if parser._exc_info: + if not parser._exc_info: parser._exc_info = e XML_StopParser(parser.itself, XML_FALSE) result = 0 @@ -584,6 +584,10 @@ space.wrap(self.all_chars), "decode", space.wrap(name), space.wrap("replace"))) + if len(translationmap) != 256: + raise oefmt(space.w_ValueError, + "multi-byte encodings are not supported") + for i in range(256): c = translationmap[i] if c == u'\ufffd': diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -2,7 +2,7 @@ from pytest import skip class AppTestPyexpat: - spaceconfig = dict(usemodules=['pyexpat']) + spaceconfig = dict(usemodules=['pyexpat', '_multibytecodec']) def teardown_class(cls): global_storage.clear() @@ -109,6 +109,13 @@ p.CharacterDataHandler = gotText p.Parse(xml) + def test_mbcs(self): + xml = "

" + import pyexpat + p = pyexpat.ParserCreate() + exc = raises(ValueError, p.Parse, xml) + assert str(exc.value) == "multi-byte encodings are not supported" + def test_decode_error(self): xml = 'Comment \xe7a va ? Tr\xe8s bien ?' import pyexpat From noreply at buildbot.pypy.org Sun Mar 2 02:45:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 02:45:35 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix compile error message case Message-ID: <20140302014535.25A9F1C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69592:2dd986a2c9d0 Date: 2014-03-01 20:40 -0500 http://bitbucket.org/pypy/pypy/changeset/2dd986a2c9d0/ Log: fix compile error message case diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py --- a/pypy/interpreter/pyparser/pyparse.py +++ b/pypy/interpreter/pyparser/pyparse.py @@ -108,7 +108,7 @@ # If an encoding is explicitly given check that it is utf-8. decl_enc = _check_for_encoding(textsrc) if decl_enc and decl_enc != "utf-8": - raise error.SyntaxError("UTF-8 BOM with non-utf8 coding cookie", + raise error.SyntaxError("UTF-8 BOM with %s coding cookie" % decl_enc, filename=compile_info.filename) elif compile_info.flags & consts.PyCF_SOURCE_IS_UTF8: enc = 'utf-8' diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py b/pypy/interpreter/pyparser/test/test_pyparse.py --- a/pypy/interpreter/pyparser/test/test_pyparse.py +++ b/pypy/interpreter/pyparser/test/test_pyparse.py @@ -55,7 +55,7 @@ assert exc.msg == "coding declaration in unicode string" input = "\xEF\xBB\xBF# coding: latin-1\nx" exc = py.test.raises(SyntaxError, self.parse, input).value - assert exc.msg == "UTF-8 BOM with non-utf8 coding cookie" + assert exc.msg == "UTF-8 BOM with latin-1 coding cookie" input = "# coding: not-here" exc = py.test.raises(SyntaxError, self.parse, input).value assert exc.msg == "Unknown encoding: not-here" diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -486,6 +486,23 @@ raises(ValueError, compile, "\n", "", "exec", 0xff) raises(TypeError, compile, '1+2', 12, 34) + def test_compile_error_message(self): + import re + compile('# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') + compile('\xef\xbb\xbf\n', 'dummy', 'exec') + compile('\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec') + exc = raises(SyntaxError, compile, + '# -*- coding: fake -*-\n', 'dummy', 'exec') + assert 'fake' in exc.value[0] + exc = raises(SyntaxError, compile, + '\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') + assert 'iso-8859-15' in exc.value[0] + assert 'BOM' in exc.value[0] + exc = raises(SyntaxError, compile, + '\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec') + assert 'fake' in exc.value[0] + assert 'BOM' in exc.value[0] + def test_unicode_compile(self): try: compile(u'-', '?', 'eval') From noreply at buildbot.pypy.org Sun Mar 2 02:45:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 02:45:36 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: merge heads Message-ID: <20140302014536.729401C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69593:ae3ed95563a4 Date: 2014-03-01 20:44 -0500 http://bitbucket.org/pypy/pypy/changeset/ae3ed95563a4/ Log: merge heads diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -336,7 +336,7 @@ try: parser.UnknownEncodingHandler(space, name, info) except OperationError, e: - if parser._exc_info: + if not parser._exc_info: parser._exc_info = e XML_StopParser(parser.itself, XML_FALSE) result = 0 @@ -584,6 +584,10 @@ space.wrap(self.all_chars), "decode", space.wrap(name), space.wrap("replace"))) + if len(translationmap) != 256: + raise oefmt(space.w_ValueError, + "multi-byte encodings are not supported") + for i in range(256): c = translationmap[i] if c == u'\ufffd': diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -2,7 +2,7 @@ from pytest import skip class AppTestPyexpat: - spaceconfig = dict(usemodules=['pyexpat']) + spaceconfig = dict(usemodules=['pyexpat', '_multibytecodec']) def teardown_class(cls): global_storage.clear() @@ -109,6 +109,13 @@ p.CharacterDataHandler = gotText p.Parse(xml) + def test_mbcs(self): + xml = "

" + import pyexpat + p = pyexpat.ParserCreate() + exc = raises(ValueError, p.Parse, xml) + assert str(exc.value) == "multi-byte encodings are not supported" + def test_decode_error(self): xml = 'Comment \xe7a va ? Tr\xe8s bien ?' import pyexpat From noreply at buildbot.pypy.org Sun Mar 2 03:28:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 03:28:35 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: precision limit is actually INT_MAX Message-ID: <20140302022835.AADF31C3427@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69594:64b7e6ad4753 Date: 2014-03-01 21:27 -0500 http://bitbucket.org/pypy/pypy/changeset/64b7e6ad4753/ Log: precision limit is actually INT_MAX diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -4,7 +4,6 @@ import sys from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit -from rpython.rlib.rarithmetic import ovfcheck from rpython.rlib.rfloat import formatd, DTSF_ALT, isnan, isinf from rpython.rlib.rstring import StringBuilder, UnicodeBuilder from rpython.rlib.unroll import unrolling_iterable @@ -226,7 +225,7 @@ if self.peekchr() == '.': self.forward() - self.prec = self.peel_num('prec', sys.maxint) + self.prec = self.peel_num('prec', 2**31 - 1) if self.prec < 0: self.prec = 0 # this can happen: '%.*f' % (-5, 3) else: diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -205,8 +205,7 @@ assert "%x" % IntFails() == '0' def test_formatting_huge_precision(self): - import sys - format_string = "%.{}f".format(sys.maxint + 1) + format_string = "%.{}f".format(2**31) exc = raises(ValueError, "format_string % 2.34") assert exc.value[0] == 'prec too big' @@ -337,8 +336,7 @@ raises(ValueError, 'u"%\u1234" % (f,)') def test_formatting_huge_precision(self): - import sys - format_string = u"%.{}f".format(sys.maxint + 1) + format_string = u"%.{}f".format(2**31) exc = raises(ValueError, "format_string % 2.34") assert exc.value[0] == 'prec too big' From noreply at buildbot.pypy.org Sun Mar 2 03:45:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 03:45:31 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: make this a constant in rarithmetic Message-ID: <20140302024531.91FFB1C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69595:2ca330e1a022 Date: 2014-03-01 21:43 -0500 http://bitbucket.org/pypy/pypy/changeset/2ca330e1a022/ Log: make this a constant in rarithmetic diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -7,6 +7,7 @@ from rpython.rlib.rfloat import formatd, DTSF_ALT, isnan, isinf from rpython.rlib.rstring import StringBuilder, UnicodeBuilder from rpython.rlib.unroll import unrolling_iterable +from rpython.rlib.rarithmetic import INT_MAX from rpython.tool.sourcetools import func_with_new_name @@ -225,7 +226,7 @@ if self.peekchr() == '.': self.forward() - self.prec = self.peel_num('prec', 2**31 - 1) + self.prec = self.peel_num('prec', INT_MAX) if self.prec < 0: self.prec = 0 # this can happen: '%.*f' % (-5, 3) else: diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -75,6 +75,8 @@ # to handle the win64 special case: is_emulated_long = _long_typecode != 'l' +INT_MAX = 2**(_get_bitsize('i') - 1) - 1 + LONG_BIT = _get_long_bit() LONG_MASK = (2**LONG_BIT)-1 LONG_TEST = 2**(LONG_BIT-1) From noreply at buildbot.pypy.org Sun Mar 2 05:03:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 05:03:42 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix tcl split/splitlist Message-ID: <20140302040342.1AACA1C1504@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69596:97f50d229f4d Date: 2014-03-01 22:22 -0500 http://bitbucket.org/pypy/pypy/changeset/97f50d229f4d/ Log: fix tcl split/splitlist diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -334,15 +334,40 @@ return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def split(self, arg): - if isinstance(arg, tuple): + if isinstance(arg, TclObject): + objc = tkffi.new("int*") + objv = tkffi.new("Tcl_Obj***") + status = tklib.Tcl_ListObjGetElements(self.interp, arg._value, objc, objv) + if status == tklib.TCL_ERROR: + return FromObj(self, arg._value) + if objc == 0: + return '' + elif objc == 1: + return FromObj(self, objv[0][0]) + result = [] + for i in range(objc[0]): + result.append(FromObj(self, objv[0][i])) + return tuple(result) + elif isinstance(arg, tuple): return self._splitObj(arg) - else: - return self._split(arg) + elif isinstance(arg, unicode): + arg = arg.encode('utf8') + return self._split(arg) def splitlist(self, arg): - if isinstance(arg, tuple): + if isinstance(arg, TclObject): + objc = tkffi.new("int*") + objv = tkffi.new("Tcl_Obj***") + status = tklib.Tcl_ListObjGetElements(self.interp, arg._value, objc, objv) + if status == tklib.TCL_ERROR: + self.raiseTclError() + result = [] + for i in range(objc[0]): + result.append(FromObj(self, objv[0][i])) + return tuple(result) + elif isinstance(arg, tuple): return arg - if isinstance(arg, unicode): + elif isinstance(arg, unicode): arg = arg.encode('utf8') argc = tkffi.new("int*") @@ -359,23 +384,34 @@ def _splitObj(self, arg): if isinstance(arg, tuple): size = len(arg) + result = None # Recursively invoke SplitObj for all tuple items. # If this does not return a new object, no action is # needed. - result = None - newelems = (self._splitObj(elem) for elem in arg) - for elem, newelem in zip(arg, newelems): - if elem is not newelem: - return newelems - elif isinstance(arg, str): + for i in range(size): + elem = arg[i] + newelem = self._splitObj(elem) + if result is None: + if newelem == elem: + continue + result = [None] * size + for k in range(i): + result[k] = arg[k] + result[i] = newelem + if result is not None: + return tuple(result) + elif isinstance(arg, basestring): argc = tkffi.new("int*") argv = tkffi.new("char***") - res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) - if res == tklib.TCL_ERROR: + if isinstance(arg, unicode): + arg = arg.encode('utf-8') + list_ = str(arg) + res = tklib.Tcl_SplitList(tkffi.NULL, list_, argc, argv) + if res != tklib.TCL_OK: return arg tklib.Tcl_Free(argv[0]) if argc[0] > 1: - return self._split(arg) + return self._split(list_) return arg def _split(self, arg): @@ -392,10 +428,10 @@ if argc[0] == 0: return "" elif argc[0] == 1: - return argv[0][0] + return tkffi.string(argv[0][0]) else: - return (self._split(argv[0][i]) - for i in range(argc[0])) + return tuple(self._split(argv[0][i]) + for i in range(argc[0])) finally: tklib.Tcl_Free(argv[0]) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -78,6 +78,7 @@ int Tcl_GetCharLength(Tcl_Obj* objPtr); Tcl_Obj *Tcl_NewListObj(int objc, Tcl_Obj* const objv[]); +int Tcl_ListObjGetElements(Tcl_Interp *interp, Tcl_Obj *listPtr, int *objcPtr, Tcl_Obj ***objvPtr); int Tcl_ListObjLength(Tcl_Interp* interp, Tcl_Obj* listPtr, int* intPtr); int Tcl_ListObjIndex(Tcl_Interp* interp, Tcl_Obj* listPtr, int index, Tcl_Obj** objPtrPtr); int Tcl_SplitList(Tcl_Interp* interp, char* list, int* argcPtr, const char*** argvPtr); From noreply at buildbot.pypy.org Sun Mar 2 05:19:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 05:19:31 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: sys.maxsize -> sys.maxint Message-ID: <20140302041931.0B0BC1C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69597:f63389a764ea Date: 2014-03-01 23:18 -0500 http://bitbucket.org/pypy/pypy/changeset/f63389a764ea/ Log: sys.maxsize -> sys.maxint diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -218,7 +218,7 @@ self.peel_flags() - self.width = self.peel_num('width', sys.maxsize) + self.width = self.peel_num('width', sys.maxint) if self.width < 0: # this can happen: '%*s' % (-5, "hi") self.f_ljust = True diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -21,7 +21,7 @@ while i < end: digit = ord(s[i]) - ord('0') if 0 <= digit <= 9: - if result > (sys.maxsize - digit) / 10: + if result > (sys.maxint - digit) / 10: raise oefmt(space.w_ValueError, "too many decimal digits in format string") result = result * 10 + digit From noreply at buildbot.pypy.org Sun Mar 2 06:44:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 06:44:30 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: try this method Message-ID: <20140302054430.07DE41C025B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69598:f9318d0034e1 Date: 2014-03-02 00:43 -0500 http://bitbucket.org/pypy/pypy/changeset/f9318d0034e1/ Log: try this method diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -597,7 +597,7 @@ v = space.wrap("URI") else: assert False - as_ = name[0].c_d + as_ = rffi.cast(ASN1_STRING, name[0].c_d) buf = libssl_ASN1_STRING_data(as_) length = libssl_ASN1_STRING_length(as_) w_t = space.newtuple([v, diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -137,7 +137,7 @@ GENERAL_NAME_st = rffi_platform.Struct( 'struct GENERAL_NAME_st', [('type', rffi.INT), - ('d', ASN1_STRING)]) + ('d', rffi.VOIDP)]) EVP_MD_st = rffi_platform.Struct( 'EVP_MD', [('md_size', rffi.INT), From noreply at buildbot.pypy.org Sun Mar 2 08:03:58 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 08:03:58 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix incremental utf-16 decoder (cpython issue11461) Message-ID: <20140302070358.D8AFE1C3373@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69599:62fa89efe2e7 Date: 2014-03-02 02:02 -0500 http://bitbucket.org/pypy/pypy/changeset/62fa89efe2e7/ Log: fix incremental utf-16 decoder (cpython issue11461) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -134,11 +134,15 @@ u"\x00\xff\u07ff\u0800", u"\x00\xff\u07ff\u0800", u"\x00\xff\u07ff\u0800\uffff", + u"\x00\xff\u07ff\u0800\uffff", + u"\x00\xff\u07ff\u0800\uffff", + u"\x00\xff\u07ff\u0800\uffff", + u"\x00\xff\u07ff\u0800\uffff\U00010000", ] buffer = '' result = u"" - for (c, partialresult) in zip(u"\x00\xff\u07ff\u0800\uffff".encode(encoding), check_partial): + for (c, partialresult) in zip(u"\x00\xff\u07ff\u0800\uffff\U00010000".encode(encoding), check_partial): buffer += c res = _codecs.utf_8_decode(buffer,'strict',False) if res[1] >0 : @@ -160,10 +164,14 @@ u"\x00\xff\u0100", u"\x00\xff\u0100", u"\x00\xff\u0100\uffff", + u"\x00\xff\u0100\uffff", + u"\x00\xff\u0100\uffff", + u"\x00\xff\u0100\uffff", + u"\x00\xff\u0100\uffff\U00010000", ] buffer = '' result = u"" - for (c, partialresult) in zip(u"\x00\xff\u0100\uffff".encode(encoding), check_partial): + for (c, partialresult) in zip(u"\x00\xff\u0100\uffff\U00010000".encode(encoding), check_partial): buffer += c res = _codecs.utf_16_decode(buffer,'strict',False) if res[1] >0 : diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -1,8 +1,8 @@ import py import sys + class TestUnicodeObject: - def test_comparison_warning(self): warnings = [] def my_warn(msg, warningscls): @@ -32,6 +32,7 @@ space.w_unicode, "__new__", space.w_unicode, w_uni) assert w_new is w_uni + class AppTestUnicodeStringStdOnly: def test_compares(self): assert u'a' == 'a' @@ -314,7 +315,6 @@ assert u'xyzzyhelloxyzzy'.lstrip('xyz') == u'helloxyzzy' assert u'xyzzyhelloxyzzy'.rstrip(u'xyz') == u'xyzzyhello' - def test_long_from_unicode(self): assert long(u'12345678901234567890') == 12345678901234567890 assert int(u'12345678901234567890') == 12345678901234567890 @@ -336,7 +336,7 @@ u'a', u'"', u'\'', u'\"', u'\t', u'\\', u"'''\"", unichr(19), unichr(2), u'\u1234', u'\U00101234']: assert eval(repr(ustr)) == ustr - + def test_getnewargs(self): class X(unicode): pass @@ -400,7 +400,7 @@ assert not 'hello'.endswith((u'he\u1111', u'he')) assert 'hello'.endswith((u'\u1111lo', u'llo')) assert 'hello'.endswith((u'\u1111hellox', u'hello')) - + def test_endswith(self): assert u'ab'.endswith(u'ab') is True assert u'ab'.endswith(u'b') is True @@ -441,13 +441,13 @@ s = u'xy\t' assert s.expandtabs() =='xy ' - + s = u'\txy\t' assert s.expandtabs() ==' xy ' assert s.expandtabs(1) ==' xy ' assert s.expandtabs(2) ==' xy ' assert s.expandtabs(3) ==' xy ' - + assert u'xy'.expandtabs() =='xy' assert u''.expandtabs() =='' @@ -456,7 +456,7 @@ if sys.maxint > (1 << 32): skip("Wrong platform") raises((OverflowError, MemoryError), u't\tt\t'.expandtabs, sys.maxint) - + def test_translate(self): assert u'bbbc' == u'abababc'.translate({ord('a'):None}) assert u'iiic' == u'abababc'.translate({ord('a'):None, ord('b'):ord('i')}) @@ -473,7 +473,7 @@ def test_unicode_form_encoded_object(self): assert unicode('x', 'utf-8') == u'x' assert unicode('x', 'utf-8', 'strict') == u'x' - + def test_unicode_startswith_tuple(self): assert u'xxx'.startswith(('x', 'y', 'z'), 0) assert u'xxx'.endswith(('x', 'y', 'z'), 0) @@ -572,7 +572,6 @@ def test_partition(self): - assert (u'this is the par', u'ti', u'tion method') == \ u'this is the partition method'.partition(u'ti') @@ -587,7 +586,6 @@ raises(TypeError, S.partition, None) def test_rpartition(self): - assert (u'this is the rparti', u'ti', u'on method') == \ u'this is the rpartition method'.rpartition(u'ti') @@ -601,7 +599,6 @@ raises(ValueError, S.rpartition, u'') raises(TypeError, S.rpartition, None) - def test_mul(self): zero = 0 assert type(u'' * zero) == type(zero * u'') == unicode @@ -730,7 +727,7 @@ return X("stuff") assert unicode(Y()).__class__ is X - + def test_getslice(self): assert u'123456'.__getslice__(1, 5) == u'2345' s = u"abc" @@ -827,7 +824,7 @@ def __unicode__(self): return u'bar' - + a = A() b = B() s = '%s %s' % (a, b) diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -453,10 +453,11 @@ continue # UTF-16 code pair: if len(s) - pos < 2: + pos -= 2 if not final: break errmsg = "unexpected end of data" - r, pos = errorhandler(errors, 'utf16', errmsg, s, pos - 2, len(s)) + r, pos = errorhandler(errors, 'utf16', errmsg, s, pos, len(s)) result.append(r) if len(s) - pos < 2: break diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -4,6 +4,7 @@ import sys, random from rpython.rlib import runicode + def test_unichr(): assert runicode.UNICHR(0xffff) == u'\uffff' if runicode.MAXUNICODE > 0xffff: @@ -15,6 +16,7 @@ py.test.raises(ValueError, runicode.UNICHR, 0x10000) py.test.raises(TypeError, runicode.UNICHR, 'abc') + def test_ord(): assert runicode.ORD('a') == 97 assert runicode.ORD(u'a') == 97 @@ -118,7 +120,6 @@ class TestDecoding(UnicodeTests): - # XXX test bom recognition in utf-16 # XXX test proper error handling @@ -552,7 +553,6 @@ self.checkdecodeerror(s, "utf-8", 0, 3, addstuff=True, msg='invalid continuation byte') - def test_issue8271(self): # From CPython # Issue #8271: during the decoding of an invalid UTF-8 byte sequence, @@ -648,6 +648,7 @@ assert decoder(seq, len(seq), 'ignore', final=True ) == (res, len(seq)) + class TestEncoding(UnicodeTests): def test_all_ascii(self): for i in range(128): @@ -759,6 +760,7 @@ py.test.raises(UnicodeEncodeError, encoder, u' 12, \u1234 ', 7, None) assert encoder(u'u\u1234', 2, 'replace') == 'u?' + class TestTranslation(object): def setup_class(cls): if runicode.MAXUNICODE != sys.maxunicode: From noreply at buildbot.pypy.org Sun Mar 2 08:19:45 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 08:19:45 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix utf-7 decoder (cpython issue19279) Message-ID: <20140302071945.624AE1C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69600:5a23ee926e6c Date: 2014-03-02 02:19 -0500 http://bitbucket.org/pypy/pypy/changeset/5a23ee926e6c/ Log: fix utf-7 decoder (cpython issue19279) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -591,6 +591,30 @@ def test_utf7_surrogate(self): assert '+3ADYAA-'.decode('utf-7') == u'\udc00\ud800' + def test_utf7_errors(self): + import codecs + tests = [ + ('a\xffb', u'a\ufffdb'), + ('a+IK', u'a\ufffd'), + ('a+IK-b', u'a\ufffdb'), + ('a+IK,b', u'a\ufffdb'), + ('a+IKx', u'a\u20ac\ufffd'), + ('a+IKx-b', u'a\u20ac\ufffdb'), + ('a+IKwgr', u'a\u20ac\ufffd'), + ('a+IKwgr-b', u'a\u20ac\ufffdb'), + ('a+IKwgr,', u'a\u20ac\ufffd'), + ('a+IKwgr,-b', u'a\u20ac\ufffd-b'), + ('a+IKwgrB', u'a\u20ac\u20ac\ufffd'), + ('a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'), + ('a+/,+IKw-b', u'a\ufffd\u20acb'), + ('a+//,+IKw-b', u'a\ufffd\u20acb'), + ('a+///,+IKw-b', u'a\uffff\ufffd\u20acb'), + ('a+////,+IKw-b', u'a\uffff\ufffd\u20acb'), + ] + for raw, expected in tests: + raises(UnicodeDecodeError, codecs.utf_7_decode, raw, 'strict', True) + assert raw.decode('utf-7', 'replace') == expected + def test_utf_16_encode_decode(self): import codecs, sys x = u'123abc' @@ -605,7 +629,7 @@ assert codecs.getdecoder('utf-16')( '\xff\xfe1\x002\x003\x00a\x00b\x00c\x00') == (x, 14) - def test_unicode_escape(self): + def test_unicode_escape(self): assert u'\\'.encode('unicode-escape') == '\\\\' assert '\\\\'.decode('unicode-escape') == u'\\' assert u'\ud801'.encode('unicode-escape') == '\\ud801' diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -812,6 +812,7 @@ outCh = base64buffer >> (base64bits - 16) base64bits -= 16 base64buffer &= (1 << base64bits) - 1 # clear high bits + assert outCh <= 0xffff if surrogate: # expecting a second surrogate if outCh >= 0xDC00 and outCh <= 0xDFFFF: @@ -877,6 +878,8 @@ else: # begin base64-encoded section inShift = 1 shiftOutStartPos = pos - 1 + base64bits = 0 + base64buffer = 0 elif _utf7_DECODE_DIRECT(oc): # character decodes at itself result.append(unichr(oc)) From noreply at buildbot.pypy.org Sun Mar 2 09:03:26 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 09:03:26 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: another try at the stubborn ssl rffi usage Message-ID: <20140302080326.B61521C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69601:607fcb7dfabe Date: 2014-03-02 03:02 -0500 http://bitbucket.org/pypy/pypy/changeset/607fcb7dfabe/ Log: another try at the stubborn ssl rffi usage diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -597,7 +597,8 @@ v = space.wrap("URI") else: assert False - as_ = rffi.cast(ASN1_STRING, name[0].c_d) + as_ = libssl_pypy_GENERAL_NAME_dirn(name) + as_ = rffi.cast(ASN1_STRING, as_) buf = libssl_ASN1_STRING_data(as_) length = libssl_ASN1_STRING_length(as_) w_t = space.newtuple([v, diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -136,8 +136,7 @@ ('d2i', lltype.Ptr(X509V3_EXT_D2I))]) GENERAL_NAME_st = rffi_platform.Struct( 'struct GENERAL_NAME_st', - [('type', rffi.INT), - ('d', rffi.VOIDP)]) + [('type', rffi.INT)]) EVP_MD_st = rffi_platform.Struct( 'EVP_MD', [('md_size', rffi.INT), From noreply at buildbot.pypy.org Sun Mar 2 09:19:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 09:19:22 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix charmap_decode with non-BMP chars as integers (cpython issue15379) Message-ID: <20140302081922.D0BFC1C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69602:87cf171379cb Date: 2014-03-02 03:18 -0500 http://bitbucket.org/pypy/pypy/changeset/87cf171379cb/ Log: fix charmap_decode with non-BMP chars as integers (cpython issue15379) diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -525,33 +525,22 @@ raise return errorchar - # Charmap may return a unicode string - try: - x = space.unicode_w(w_ch) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - return x - - # Charmap may return a number - try: + if space.isinstance_w(w_ch, space.w_unicode): + # Charmap may return a unicode string + return space.unicode_w(w_ch) + elif space.isinstance_w(w_ch, space.w_int): + # Charmap may return a number x = space.int_w(w_ch) - except OperationError: - if not e.match(space, space.w_TypeError): - raise - else: - if 0 <= x < 65536: # Even on wide unicode builds... - return unichr(x) - else: - raise OperationError(space.w_TypeError, space.wrap( - "character mapping must be in range(65536)")) - - # Charmap may return None - if space.is_w(w_ch, space.w_None): + if not 0 <= x <= 0x10FFFF: + raise oefmt(space.w_TypeError, + "character mapping must be in range(0x110000)") + return unichr(x) + elif space.is_w(w_ch, space.w_None): + # Charmap may return None return errorchar - raise OperationError(space.w_TypeError, space.wrap("invalid mapping")) + raise oefmt(space.w_TypeError, + "character mapping must return integer, None or unicode") class Charmap_Encode: def __init__(self, space, w_mapping): diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -108,7 +108,16 @@ map = tuple([unichr(i) for i in range(256)]) assert charmap_decode('xxx\xff', 'strict', map) == (u'xxx\xff', 4) - raises(TypeError, charmap_decode, '\xff', "replace", {0xff: 0x10001}) + exc = raises(TypeError, charmap_decode, '\xff', "strict", {0xff: 'a'}) + assert exc.value[0] == "character mapping must return integer, None or unicode" + raises(TypeError, charmap_decode, '\xff', "strict", {0xff: 0x110000}) + assert (charmap_decode("\x00\x01\x02", "strict", + {0: 0x10FFFF, 1: ord('b'), 2: ord('c')}) == + u"\U0010FFFFbc", 3) + assert (charmap_decode("\x00\x01\x02", "strict", + {0: u'\U0010FFFF', 1: u'b', 2: u'c'}) == + u"\U0010FFFFbc", 3) + def test_unicode_escape(self): from _codecs import unicode_escape_encode, unicode_escape_decode @@ -116,6 +125,7 @@ assert unicode_escape_decode('abc') == (u'abc'.decode('unicode_escape'), 3) assert unicode_escape_decode('\\x61\\x62\\x63') == (u'abc', 12) + class AppTestPartialEvaluation: spaceconfig = dict(usemodules=('array',)) From noreply at buildbot.pypy.org Sun Mar 2 09:26:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 09:26:59 +0100 (CET) Subject: [pypy-commit] stmgc default: Unify the two hacks to check that memory is correctly zeroed. Message-ID: <20140302082659.384771C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r917:2f1cd052bf40 Date: 2014-03-02 09:26 +0100 http://bitbucket.org/pypy/stmgc/changeset/2f1cd052bf40/ Log: Unify the two hacks to check that memory is correctly zeroed. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -326,16 +326,10 @@ major collection, is cleared in sweep_large_objects() for large objects, but is not cleared for small objects. Clear it now. */ - object_t *loc2 = (object_t *)(uninitialized_page_stop - stm_object_pages); + object_t *loc2 = (object_t *)(uninitialized_page_stop - stm_object_pages); uintptr_t lock2_idx = mark_loc(loc2 - 1) + 1; -#ifdef STM_TESTS - long _i; - for (_i=0; _i 5000) n = 5000; +# endif + n /= 8; + for (i = 0; i < n; i++) + assert(((uint64_t *)s)[i] == 0); +#endif +} + static void check_nursery_at_transaction_start(void) { -#ifndef NDEBUG assert((uintptr_t)STM_SEGMENT->nursery_current == _stm_nursery_start); - uintptr_t i, limit; -# ifdef STM_TESTS - limit = NURSERY_END - _stm_nursery_start; -# else - limit = 64; -# endif - for (i = 0; i < limit; i += 8) { - assert(*(TLPREFIX uint64_t *)(STM_SEGMENT->nursery_current + i) == 0); - _duck(); - } -#endif + assert_memset_zero(REAL_ADDRESS(STM_SEGMENT->segment_base, + STM_SEGMENT->nursery_current), + NURSERY_END - _stm_nursery_start); } static void major_do_minor_collections(void) diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -17,3 +17,5 @@ static inline bool must_abort(void) { return STM_SEGMENT->nursery_end == NSE_SIGABORT; } + +static void assert_memset_zero(void *s, size_t n); From noreply at buildbot.pypy.org Sun Mar 2 09:55:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 09:55:35 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: improve type checking in charmap_encode also Message-ID: <20140302085535.436721C0865@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69603:76668a98dac6 Date: 2014-03-02 03:31 -0500 http://bitbucket.org/pypy/pypy/changeset/76668a98dac6/ Log: improve type checking in charmap_encode also diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -558,33 +558,22 @@ raise return errorchar - # Charmap may return a string - try: - x = space.realstr_w(w_ch) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - return x - - # Charmap may return a number - try: + if space.isinstance_w(w_ch, space.w_str): + # Charmap may return a string + return space.str_w(w_ch) + elif space.isinstance_w(w_ch, space.w_int): + # Charmap may return a number x = space.int_w(w_ch) - except OperationError: - if not e.match(space, space.w_TypeError): - raise - else: - if 0 <= x < 256: - return chr(x) - else: - raise OperationError(space.w_TypeError, space.wrap( - "character mapping must be in range(256)")) - - # Charmap may return None - if space.is_w(w_ch, space.w_None): + if not 0 <= x < 256: + raise oefmt(space.w_TypeError, + "character mapping must be in range(256)") + return chr(x) + elif space.is_w(w_ch, space.w_None): + # Charmap may return None return errorchar - raise OperationError(space.w_TypeError, space.wrap("invalid mapping")) + raise oefmt(space.w_TypeError, + "character mapping must return integer, None or str") @unwrap_spec(string=str, errors='str_or_None') diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -572,7 +572,10 @@ assert 'xxx'.encode('charmap') == 'xxx' import codecs - raises(TypeError, codecs.charmap_encode, u'\xff', "replace", {0xff: 300}) + exc = raises(TypeError, codecs.charmap_encode, u'\xff', "replace", {0xff: 300}) + assert exc.value[0] == 'character mapping must be in range(256)' + exc = raises(TypeError, codecs.charmap_encode, u'\xff', "replace", {0xff: u'a'}) + assert exc.value[0] == 'character mapping must return integer, None or str' raises(UnicodeError, codecs.charmap_encode, u"\xff", "replace", {0xff: None}) def test_charmap_encode_replace(self): From noreply at buildbot.pypy.org Sun Mar 2 09:55:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 09:55:36 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix escape_decode to support replace/ignore modes Message-ID: <20140302085536.858561C0865@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69604:8767512bf42b Date: 2014-03-02 03:25 -0500 http://bitbucket.org/pypy/pypy/changeset/8767512bf42b/ Log: fix escape_decode to support replace/ignore modes diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -1,7 +1,8 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter import unicodehelper from rpython.rlib.rstring import StringBuilder + def parsestr(space, encoding, s, unicode_literal=False): """Parses a string or unicode literal, and return a wrapped value. @@ -79,7 +80,7 @@ enc = None if need_encoding: enc = encoding - v = PyString_DecodeEscape(space, substr, enc) + v = PyString_DecodeEscape(space, substr, 'strict', enc) return space.wrap(v) def hexbyte(val): @@ -121,7 +122,7 @@ ps += 1 return ''.join(lis) -def PyString_DecodeEscape(space, s, recode_encoding): +def PyString_DecodeEscape(space, s, errors, recode_encoding): """ Unescape a backslash-escaped string. If recode_encoding is non-zero, the string is UTF-8 encoded and should be re-encoded in the @@ -190,9 +191,17 @@ builder.append(chr(num)) ps += 2 else: - raise_app_valueerror(space, 'invalid \\x escape') - # ignored replace and ignore for now - + if errors == 'strict': + raise_app_valueerror(space, 'invalid \\x escape') + elif errors == 'replace': + builder.append('?') + elif errors == 'ignore': + pass + else: + raise oefmt(space.w_ValueError, "decoding error; " + "unknown error handling code: %s", errors) + if ps+1 <= end and isxdigit(s[ps]): + ps += 1 else: # this was not an escape, so the backslash # has to be added, and we start over in diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -695,5 +695,5 @@ @unwrap_spec(data=str, errors='str_or_None') def escape_decode(space, data, errors='strict'): from pypy.interpreter.pyparser.parsestring import PyString_DecodeEscape - result = PyString_DecodeEscape(space, data, None) + result = PyString_DecodeEscape(space, data, errors, None) return space.newtuple([space.wrap(result), space.wrap(len(data))]) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -118,6 +118,16 @@ {0: u'\U0010FFFF', 1: u'b', 2: u'c'}) == u"\U0010FFFFbc", 3) + def test_escape_decode_errors(self): + from _codecs import escape_decode as decode + raises(ValueError, decode, br"\x") + raises(ValueError, decode, br"[\x]") + assert decode(br"[\x]\x", "ignore") == (b"[]", 6) + assert decode(br"[\x]\x", "replace") == (b"[?]?", 6) + raises(ValueError, decode, br"\x0") + raises(ValueError, decode, br"[\x0]") + assert decode(br"[\x0]\x0", "ignore") == (b"[]", 8) + assert decode(br"[\x0]\x0", "replace") == (b"[?]?", 8) def test_unicode_escape(self): from _codecs import unicode_escape_encode, unicode_escape_decode From noreply at buildbot.pypy.org Sun Mar 2 10:13:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 10:13:57 +0100 (CET) Subject: [pypy-commit] pypy default: Have a limited subset of "__pytrace__=1" work: dumping bytecodes as they Message-ID: <20140302091357.40A841C02AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69605:bc27ffd19a3c Date: 2014-03-02 10:13 +0100 http://bitbucket.org/pypy/pypy/changeset/bc27ffd19a3c/ Log: Have a limited subset of "__pytrace__=1" work: dumping bytecodes as they are executed. Sorry, not tested (how..?) diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -315,27 +315,27 @@ .. _`trace example`: -Tracing bytecode and operations on objects -++++++++++++++++++++++++++++++++++++++++++ +Tracing bytecodes ++++++++++++++++++ -You can use the trace object space to monitor the interpretation -of bytecodes in connection with object space operations. To enable -it, set ``__pytrace__=1`` on the interactive PyPy console:: +You can use a simple tracing mode to monitor the interpretation of +bytecodes. To enable it, set ``__pytrace__ = 1`` on the interactive +PyPy console:: >>>> __pytrace__ = 1 Tracing enabled - >>>> a = 1 + 2 - |- <<<< enter a = 1 + 2 @ 1 >>>> - |- 0 LOAD_CONST 0 (W_IntObject(1)) - |- 3 LOAD_CONST 1 (W_IntObject(2)) - |- 6 BINARY_ADD - |- add(W_IntObject(1), W_IntObject(2)) -> W_IntObject(3) - |- 7 STORE_NAME 0 (a) - |- hash(W_StringObject('a')) -> W_IntObject(-468864544) - |- int_w(W_IntObject(-468864544)) -> -468864544 - |-10 LOAD_CONST 2 () - |-13 RETURN_VALUE - |- <<<< leave a = 1 + 2 @ 1 >>>> + >>>> x = 5 + : LOAD_CONST 0 (5) + : STORE_NAME 0 (x) + : LOAD_CONST 1 (None) + : RETURN_VALUE 0 + >>>> x + : LOAD_NAME 0 (x) + : PRINT_EXPR 0 + 5 + : LOAD_CONST 0 (None) + : RETURN_VALUE 0 + >>>> Demos ------- diff --git a/pypy/interpreter/interactive.py b/pypy/interpreter/interactive.py --- a/pypy/interpreter/interactive.py +++ b/pypy/interpreter/interactive.py @@ -189,8 +189,7 @@ try: code.exec_code(self.space, self.w_globals, self.w_globals) finally: - if self.tracelevel: - self.space.unsettrace() + self.unsettrace() self.checktrace() # run doit() in an exception-catching box @@ -203,7 +202,38 @@ def settrace(self): if self.tracelevel: - self.space.settrace() + ec = self.space.getexecutioncontext() + if not hasattr(self, '_orig_bytecode_only_trace'): + self._orig_bytecode_only_trace = ec.bytecode_only_trace + ec.bytecode_only_trace = self._do_bytecode_only_trace + + def unsettrace(self): + if self.tracelevel: + ec = self.space.getexecutioncontext() + ec.bytecode_only_trace = self._orig_bytecode_only_trace + + def _do_bytecode_only_trace(self, frame): + from pypy.tool.pydis import Bytecode, HAVE_ARGUMENT + + if frame.hide(): + return + + self.unsettrace() + next_instr = frame.last_instr + opcode = ord(frame.pycode.co_code[next_instr]) + + oparg = 0 + if opcode >= HAVE_ARGUMENT: + lo = ord(frame.pycode.co_code[next_instr+1]) + hi = ord(frame.pycode.co_code[next_instr+2]) + oparg = (hi * 256) | lo + + class fake: + code = frame.pycode + bytecode = Bytecode(fake, next_instr, oparg, 0) + print '\t%-19s %s' % (str(frame.pycode.co_name) + ':', + bytecode.repr_with_space(self.space)) + self.settrace() def checktrace(self): s = self.space @@ -213,11 +243,11 @@ s.wrap("__pytrace__"))) if self.tracelevel > 0 and tracelevel == 0: - s.reset_trace() + self.unsettrace() print "Tracing disabled" if self.tracelevel == 0 and tracelevel > 0: - self.space.unsettrace() + self.unsettrace() print "Tracing enabled" self.tracelevel = tracelevel From noreply at buildbot.pypy.org Sun Mar 2 10:25:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 10:25:46 +0100 (CET) Subject: [pypy-commit] pypy default: A test for bc27ffd19a3c Message-ID: <20140302092546.C53051C02AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69606:931f9189e510 Date: 2014-03-02 10:25 +0100 http://bitbucket.org/pypy/pypy/changeset/931f9189e510/ Log: A test for bc27ffd19a3c diff --git a/pypy/interpreter/test/test_zpy.py b/pypy/interpreter/test/test_zpy.py --- a/pypy/interpreter/test/test_zpy.py +++ b/pypy/interpreter/test/test_zpy.py @@ -7,10 +7,13 @@ pypypath = py.path.local(pypy.__file__).dirpath("bin", "pyinteractive.py") -def run(*args): +def run(*args, **kwds): + stdin = kwds.pop('stdin', '') + assert not kwds argslist = map(str, args) - popen = subprocess.Popen(argslist, stdout=subprocess.PIPE) - stdout, stderr = popen.communicate() + popen = subprocess.Popen(argslist, stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + stdout, stderr = popen.communicate(stdin) return stdout @@ -99,3 +102,19 @@ stderr=subprocess.PIPE) _, stderr = popen.communicate() assert stderr.endswith('KeyError: \n') + + +def test_pytrace(): + output = run(sys.executable, pypypath, '-S', + stdin="__pytrace__ = 1\nx = 5\nx") + assert ('\t: LOAD_CONST 0 (5)\n' + '\t: STORE_NAME 0 (x)\n' + '\t: LOAD_CONST 1 (None)\n' + '\t: RETURN_VALUE 0 \n' + '>>>> ') in output + assert ('\t: LOAD_NAME 0 (x)\n' + '\t: PRINT_EXPR 0 \n' + # '5\n' --- this line sent to stderr + '\t: LOAD_CONST 0 (None)\n' + '\t: RETURN_VALUE 0 \n' + '>>>> ') in output From noreply at buildbot.pypy.org Sun Mar 2 10:26:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 10:26:02 +0100 (CET) Subject: [pypy-commit] stmgc default: Done major GC Message-ID: <20140302092602.D49551C02AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r918:e75c4da5871c Date: 2014-03-02 10:25 +0100 http://bitbucket.org/pypy/stmgc/changeset/e75c4da5871c/ Log: Done major GC diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -1,5 +1,3 @@ - -- major GC - use small uniform gcpages From noreply at buildbot.pypy.org Sun Mar 2 10:48:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 10:48:45 +0100 (CET) Subject: [pypy-commit] stmgc default: Remove outdated comments. Kill the call to Message-ID: <20140302094845.150FE1C0865@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r919:afa747f195c1 Date: 2014-03-02 10:48 +0100 http://bitbucket.org/pypy/stmgc/changeset/afa747f195c1/ Log: Remove outdated comments. Kill the call to reset_transaction_read_version_prebuilt(). diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -119,17 +119,6 @@ { /* force-reset all read markers to 0 */ - /* XXX measure the time taken by this madvise() and the following - zeroing of pages done lazily by the kernel; compare it with using - 16-bit read_versions. - */ - /* XXX try to use madvise() on smaller ranges of memory. In my - measures, we could gain a factor 2 --- not really more, even if - the range of virtual addresses below is very large, as long as it - is already mostly non-reserved pages. (The following call keeps - them non-reserved; apparently the kernel just skips them very - quickly.) - */ char *readmarkers = REAL_ADDRESS(STM_SEGMENT->segment_base, FIRST_READMARKER_PAGE * 4096UL); dprintf(("reset_transaction_read_version: %p %ld\n", readmarkers, @@ -143,7 +132,6 @@ #endif memset(readmarkers, 0, NB_READMARKER_PAGES * 4096UL); } - reset_transaction_read_version_prebuilt(); STM_SEGMENT->transaction_read_version = 1; } From noreply at buildbot.pypy.org Sun Mar 2 10:48:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 10:48:46 +0100 (CET) Subject: [pypy-commit] stmgc default: Add a missing thing about the major gc Message-ID: <20140302094846.161661C0865@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r920:0fb6c73d7b5a Date: 2014-03-02 10:48 +0100 http://bitbucket.org/pypy/stmgc/changeset/0fb6c73d7b5a/ Log: Add a missing thing about the major gc diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -403,6 +403,9 @@ clean_write_locks(); major_set_write_locks(); + /* XXX should re-share pages if possible; and each re-sharing + decreases total_allocated by 4096 */ + dprintf((" | used after collection: %ld\n", (long)pages_ctl.total_allocated)); dprintf((" `----------------------------------------------\n")); From noreply at buildbot.pypy.org Sun Mar 2 11:07:50 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 11:07:50 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix unicode_decode_escape behavior Message-ID: <20140302100750.BB8F11C1041@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69607:a3e0260e2eea Date: 2014-03-02 05:06 -0500 http://bitbucket.org/pypy/pypy/changeset/a3e0260e2eea/ Log: fix unicode_decode_escape behavior diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -315,6 +315,28 @@ raises(ValueError, br"\x0".decode, 'string_escape') raises(ValueError, br"[\x0]".decode, 'string_escape') + def test_unicode_escape_decode_errors(self): + from _codecs import unicode_escape_decode, raw_unicode_escape_decode + for decode in [unicode_escape_decode, raw_unicode_escape_decode]: + for c, d in ('u', 4), ('U', 4): + for i in range(d): + raises(UnicodeDecodeError, decode, + "\\" + c + "0"*i) + raises(UnicodeDecodeError, decode, + "[\\" + c + "0"*i + "]") + data = "[\\" + c + "0"*i + "]\\" + c + "0"*i + assert decode(data, "ignore") == (u"[]", len(data)) + assert decode(data, "replace") == (u"[\ufffd]\ufffd", len(data)) + raises(UnicodeDecodeError, decode, r"\U00110000") + assert decode(r"\U00110000", "ignore") == (u"", 10) + assert decode(r"\U00110000", "replace") == (u"\ufffd", 10) + exc = raises(UnicodeDecodeError, unicode_escape_decode, "\u1z32z3", 'strict') + assert str(exc.value) == "'unicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX escape" + exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, "\u1z32z3", 'strict') + assert str(exc.value) == "'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" + exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, "\U1z32z3", 'strict') + assert str(exc.value) == "'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" + def test_escape_encode(self): assert '"'.encode('string_escape') == '"' assert "'".encode('string_escape') == "\\'" diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -1126,9 +1126,11 @@ encoding, errorhandler, message, errors): chr = 0 if pos + digits > len(s): - message = "end of string in escape sequence" - res, pos = errorhandler(errors, "unicodeescape", - message, s, pos-2, len(s)) + endinpos = pos + while endinpos < len(s) and s[endinpos] in hexdigits: + endinpos += 1 + res, pos = errorhandler(errors, encoding, + message, s, pos-2, endinpos) builder.append(res) else: try: @@ -1138,7 +1140,7 @@ while s[endinpos] in hexdigits: endinpos += 1 res, pos = errorhandler(errors, encoding, - message, s, pos-2, endinpos+1) + message, s, pos-2, endinpos) builder.append(res) else: # when we get here, chr is a 32-bit unicode character @@ -1443,12 +1445,8 @@ pos += 1 continue - if s[pos] == 'u': - digits = 4 - message = "truncated \\uXXXX escape" - else: - digits = 8 - message = "truncated \\UXXXXXXXX escape" + digits = 4 if s[pos] == 'u' else 8 + message = "truncated \\uXXXX" pos += 1 pos = hexescape(result, s, pos, digits, "rawunicodeescape", errorhandler, message, errors) From noreply at buildbot.pypy.org Sun Mar 2 11:32:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 11:32:33 +0100 (CET) Subject: [pypy-commit] stmgc default: Coment out this function here too. Message-ID: <20140302103233.CB7101C315E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r921:1974a2cff585 Date: 2014-03-02 11:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/1974a2cff585/ Log: Coment out this function here too. diff --git a/c7/stm/prebuilt.c b/c7/stm/prebuilt.c --- a/c7/stm/prebuilt.c +++ b/c7/stm/prebuilt.c @@ -73,8 +73,10 @@ memcpy(REAL_ADDRESS(segment_base, utarget), source, size); } +#if 0 static void reset_transaction_read_version_prebuilt(void) { memset(REAL_ADDRESS(STM_SEGMENT->segment_base, prebuilt_readmarkers_start), 0, prebuilt_readmarkers_end - prebuilt_readmarkers_start); } +#endif From noreply at buildbot.pypy.org Sun Mar 2 11:41:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 11:41:31 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: add codeccallback test that passes after last fix Message-ID: <20140302104131.CBFFE1C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69608:6f8dc12b87a0 Date: 2014-03-02 05:12 -0500 http://bitbucket.org/pypy/pypy/changeset/6f8dc12b87a0/ Log: add codeccallback test that passes after last fix diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -589,6 +589,16 @@ else: assert res == u"\x00\x00\x01\x00\x00" # UCS2 build + def handler1(exc): + if not isinstance(exc, UnicodeEncodeError) \ + and not isinstance(exc, UnicodeDecodeError): + raise TypeError("don't know how to handle %r" % exc) + l = [u"<%d>" % ord(exc.object[pos]) for pos in xrange(exc.start, exc.end)] + return (u"[%s]" % u"".join(l), exc.end) + codecs.register_error("test.handler1", handler1) + assert "\\u3042\u3xxx".decode("unicode-escape", "test.handler1") == \ + u"\u3042[<92><117><51>]xxx" + def test_encode_error_bad_handler(self): import codecs codecs.register_error("test.bad_handler", lambda e: (repl, 1)) From noreply at buildbot.pypy.org Sun Mar 2 11:41:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 11:41:32 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: another string formatting overflow fix Message-ID: <20140302104132.E10DD1C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69609:acad125f3839 Date: 2014-03-02 05:40 -0500 http://bitbucket.org/pypy/pypy/changeset/acad125f3839/ Log: another string formatting overflow fix diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -218,7 +218,7 @@ self.peel_flags() - self.width = self.peel_num('width', sys.maxint) + self.width = self.peel_num('width', self.space.int_w, sys.maxint) if self.width < 0: # this can happen: '%*s' % (-5, "hi") self.f_ljust = True @@ -226,7 +226,7 @@ if self.peekchr() == '.': self.forward() - self.prec = self.peel_num('prec', INT_MAX) + self.prec = self.peel_num('prec', self.space.c_int_w, INT_MAX) if self.prec < 0: self.prec = 0 # this can happen: '%.*f' % (-5, 3) else: @@ -264,13 +264,13 @@ # Same as getmappingkey @jit.unroll_safe - def peel_num(self, name, maxval): + def peel_num(self, name, conv_w, maxval): space = self.space c = self.peekchr() if c == '*': self.forward() w_value = self.nextinputvalue() - return space.int_w(maybe_int(space, w_value)) + return conv_w(w_value) result = 0 while True: digit = ord(c) - ord('0') diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -205,9 +205,11 @@ assert "%x" % IntFails() == '0' def test_formatting_huge_precision(self): - format_string = "%.{}f".format(2**31) + prec = 2**31 + format_string = "%.{}f".format(prec) exc = raises(ValueError, "format_string % 2.34") assert exc.value[0] == 'prec too big' + raises(OverflowError, lambda: u'%.*f' % (prec, 1. / 7)) def test_formatting_huge_width(self): import sys @@ -336,9 +338,11 @@ raises(ValueError, 'u"%\u1234" % (f,)') def test_formatting_huge_precision(self): - format_string = u"%.{}f".format(2**31) + prec = 2**31 + format_string = u"%.{}f".format(prec) exc = raises(ValueError, "format_string % 2.34") assert exc.value[0] == 'prec too big' + raises(OverflowError, lambda: u'%.*f' % (prec, 1. / 7)) def test_formatting_huge_width(self): import sys From noreply at buildbot.pypy.org Sun Mar 2 12:04:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 12:04:01 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix an overflow check in ctypes Message-ID: <20140302110401.574451C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69610:6e0fd2b13e01 Date: 2014-03-02 06:03 -0500 http://bitbucket.org/pypy/pypy/changeset/6e0fd2b13e01/ Log: fix an overflow check in ctypes diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -1,4 +1,3 @@ - import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject @@ -6,6 +5,7 @@ from _ctypes.pointer import _Pointer import inspect + def names_and_fields(self, _fields_, superclass, anonymous_fields=None): # _fields_: list of (name, ctype, [optional_bitfield]) if isinstance(_fields_, tuple): @@ -74,6 +74,7 @@ for name, field in fields.items(): setattr(self, name, field) + class Field(object): def __init__(self, name, offset, size, ctype, num, is_bitfield): self.__dict__['name'] = name @@ -102,7 +103,6 @@ suba = obj._subarray(fieldtype, self.name) return fieldtype._CData_output(suba, obj, offset) - def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) @@ -121,9 +121,6 @@ obj._buffer.__setattr__(self.name, arg) - -# ________________________________________________________________ - def _set_shape(tp, rawfields, is_union=False): tp._ffistruct = _rawffi.Structure(rawfields, is_union, getattr(tp, '_pack_', 0)) @@ -145,8 +142,8 @@ return _CDataMeta.__setattr__(self, name, value) + class StructOrUnionMeta(_CDataMeta): - def __new__(self, name, cls, typedict): res = type.__new__(self, name, cls, typedict) if "_abstract_" in typedict: @@ -154,6 +151,9 @@ cls = cls or (object,) if isinstance(cls[0], StructOrUnionMeta): cls[0]._make_final() + if '_pack_' in typedict: + if not 0 <= typedict['_pack_'] < 2**31: + raise ValueError("_pack_ must be a non-negative integer") if '_fields_' in typedict: if not hasattr(typedict.get('_anonymous_', []), '__iter__'): raise TypeError("Anonymous field must be iterable") @@ -164,7 +164,6 @@ res, typedict['_fields_'], cls[0], typedict.get('_anonymous_', None)) - return res def _make_final(self): @@ -206,7 +205,7 @@ res.__dict__['_base'] = base res.__dict__['_index'] = index return res - + def _CData_retval(self, resbuffer): res = StructOrUnion.__new__(self) res.__dict__['_buffer'] = resbuffer @@ -214,6 +213,7 @@ res.__dict__['_index'] = -1 return res + class StructOrUnion(_CData): __metaclass__ = StructOrUnionMeta @@ -257,5 +257,6 @@ class StructureMeta(StructOrUnionMeta): _is_union = False + class Structure(StructOrUnion): __metaclass__ = StructureMeta From noreply at buildbot.pypy.org Sun Mar 2 12:30:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 12:30:27 +0100 (CET) Subject: [pypy-commit] stmgc default: In-progress: resharing of pages. Doesn't work right now, so is not enabled. Message-ID: <20140302113027.EA32B1C0865@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r922:2e8187050db2 Date: 2014-03-02 12:30 +0100 http://bitbucket.org/pypy/stmgc/changeset/2e8187050db2/ Log: In-progress: resharing of pages. Doesn't work right now, so is not enabled. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -50,7 +50,9 @@ uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; if (prev_owner != 0 && prev_owner != STM_PSEGMENT->write_lock_num) { - uint8_t other_segment_num = prev_owner - 1; + uint8_t other_segment_num = prev_owner - _SINGLE_SEGMENT_PAGE; + assert(get_priv_segment(other_segment_num)->write_lock_num == + prev_owner); contention_management(other_segment_num); /* The rest of this code is for the case where we continue to diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -189,6 +189,112 @@ } } +/************************************************************/ + + +static inline void mark_single_flag_private(uintptr_t pagenum, uint8_t flagnum) +{ + uint8_t old_flag = flag_page_private[pagenum]; + + if (old_flag == SHARED_PAGE) /* nothing to do, page already shared */ + return; + + if (old_flag == flagnum) /* page already marked for this segment */ + return; + + if (old_flag == PRIVATE_PAGE) { /* a not-seen-before private page */ + flag_page_private[pagenum] = flagnum; + return; + } + + /* else, conflict: the page has been seen from two different segments. + Use REMAPPING_PAGE to mean this situation here. */ + flag_page_private[pagenum] = REMAPPING_PAGE; +} + +static inline void mark_flag_page_private(object_t *obj, uint8_t flag_num, + char *segment_base) +{ + uintptr_t first_page = ((uintptr_t)obj) / 4096UL; + + if (LIKELY((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0)) { + mark_single_flag_private(first_page, flag_num); + } + else { + char *realobj; + size_t obj_size; + uintptr_t end_page; + + /* get the size of the object */ + realobj = REAL_ADDRESS(segment_base, obj); + obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + + /* that's the page *following* the last page with the object */ + end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; + + while (first_page < end_page) + mark_single_flag_private(first_page++, flag_num); + } +} + +static void major_reshare_pages_range(uintptr_t first_page, uintptr_t end_page) +{ + uintptr_t i; + for (i = first_page; i < end_page; i++) { + + uint8_t flag = flag_page_private[i]; + + if (flag == REMAPPING_PAGE) { + /* this page stays private after major collection */ + flag_page_private[i] = PRIVATE_PAGE; + } + else if (flag >= PRIVATE_PAGE) { + /* this page becomes shared again */ + + /* XXX rather slow version here. improve! */ + + abort(); /* doesn't work, actually. we can't keep object data + from segment 1 and largemalloc's chunk data from + segment 0. mess mess mess */ + + char buffer[4096 + 64]; + char *pbuffer = buffer; + pbuffer += ((-(uintptr_t)pbuffer) & 63); /* align */ + + char *ppage0 = get_segment_base(0) + i * 4096; + char *ppage1 = get_segment_base(1) + i * 4096; + + /* do two copies: out of the page seen now as in the seg 0, + and then back into the same location after remapping */ + pagecopy(pbuffer, ppage0); + /* a better approach is possible in which we don't have this */ + madvise(ppage0, 4096, MADV_DONTNEED); + madvise(ppage1, 4096, MADV_DONTNEED); + d_remap_file_pages(ppage0, 4096, i); + d_remap_file_pages(ppage1, 4096, i); + pagecopy(ppage0, pbuffer); + flag_page_private[i] = SHARED_PAGE; + + increment_total_allocated(-4096 * (NB_SEGMENTS-1)); + } + } +} + +static void major_reshare_pages(void) +{ + /* re-share pages if possible. Each re-sharing decreases + total_allocated by 4096. */ + major_reshare_pages_range( + END_NURSERY_PAGE, + (uninitialized_page_start - stm_object_pages) / 4096UL); + major_reshare_pages_range( + (uninitialized_page_stop - stm_object_pages) / 4096UL, + NB_PAGES); +} + +/************************************************************/ + + static inline void mark_record_trace(object_t **pobj) { /* takes a normal pointer to a thread-local pointer to an object */ @@ -198,13 +304,34 @@ return; /* already visited this object */ LIST_APPEND(mark_objects_to_trace, obj); + + /* Note: this obj might be visited already, but from a different + segment. We ignore this case and skip re-visiting the object + anyway. The idea is that such an object is old (not from the + current transaction), otherwise it would not be possible to see + it in two segments; and moreover it is not modified, otherwise + mark_trace() would have been called on two different segments + already. That means that this object is identical in all + segments and only needs visiting once. (It may actually be in a + shared page, or maybe not.) + */ } static void mark_trace(object_t *obj, char *segment_base) { + uint8_t flag_num = + ((struct stm_priv_segment_info_s *) + REAL_ADDRESS(segment_base, STM_PSEGMENT))->write_lock_num; + assert(list_is_empty(mark_objects_to_trace)); while (1) { + + /* first update the flag in flag_page_private[] to correspond + to this segment */ + if (0) mark_flag_page_private(obj, flag_num, segment_base); + + /* trace into the object (the version from 'segment_base') */ struct object_s *realobj = (struct object_s *)REAL_ADDRESS(segment_base, obj); stmcb_trace(realobj, &mark_record_trace); @@ -373,7 +500,7 @@ static void major_collection_now_at_safe_point(void) { dprintf(("\n")); - dprintf((" .----- major_collection_now_at_safe_point -----\n")); + dprintf((" .----- major collection -----------------------\n")); assert(_has_mutex()); /* first, force a minor collection in each of the other segments */ @@ -396,6 +523,7 @@ /* sweeping */ mutex_pages_lock(); + if (0) major_reshare_pages(); sweep_large_objects(); //sweep_uniform_pages(); mutex_pages_unlock(); @@ -403,9 +531,6 @@ clean_write_locks(); major_set_write_locks(); - /* XXX should re-share pages if possible; and each re-sharing - decreases total_allocated by 4096 */ - dprintf((" | used after collection: %ld\n", (long)pages_ctl.total_allocated)); dprintf((" `----------------------------------------------\n")); diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -13,6 +13,10 @@ /* Page is private for each segment. */ PRIVATE_PAGE, + + /* Higher values are used by gcpage.c to mark pages that are privatized + but where so far only one segment was found. */ + _SINGLE_SEGMENT_PAGE }; static uint8_t flag_page_private[NB_PAGES]; diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -49,8 +49,8 @@ PROT_NONE); struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(i + 1 <= 255); - pr->write_lock_num = i + 1; + assert(_SINGLE_SEGMENT_PAGE + i <= 255); + pr->write_lock_num = _SINGLE_SEGMENT_PAGE + i; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; pr->objects_pointing_to_nursery = NULL; diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -182,3 +182,28 @@ def test_trace_correct_version_of_overflow_objects_2(self): self.test_trace_correct_version_of_overflow_objects_1(size=5000) + + def test_reshare_if_no_longer_modified_0(self, invert=0): + if invert: + self.switch(1) + self.start_transaction() + x = stm_allocate(5000) + self.push_root(x) + self.commit_transaction() + x = self.pop_root() + # + self.switch(1 - invert) + self.start_transaction() + self.push_root(x) + stm_set_char(x, 'A') + stm_major_collect() + assert lib._stm_total_allocated() == 5000 + LMO + 2 * 4096 # 2 pages + self.commit_transaction() + # + self.start_transaction() + stm_major_collect() + py.test.skip("XXX implement me") + assert lib._stm_total_allocated() == 5000 + LMO # shared again + + def test_reshare_if_no_longer_modified_1(self): + self.test_reshare_if_no_longer_modified_0(invert=1) diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -364,6 +364,7 @@ thread_state.push_roots(ex) ex.do('%s = stm_allocate(%s)' % (r, size)) + ex.do('# 0x%x' % (int(ffi.cast("uintptr_t", ex.content[r])))) thread_state.transaction_state.add_root(r, 0, True) thread_state.pop_roots(ex) @@ -375,6 +376,7 @@ r = global_state.get_new_root_name(True, num) thread_state.push_roots(ex) ex.do('%s = stm_allocate_refs(%s)' % (r, num)) + ex.do('# 0x%x' % (int(ffi.cast("uintptr_t", ex.content[r])))) thread_state.transaction_state.add_root(r, "ffi.NULL", True) thread_state.pop_roots(ex) From noreply at buildbot.pypy.org Sun Mar 2 13:07:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 13:07:17 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix translation Message-ID: <20140302120717.DF6151C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69611:9268939caf10 Date: 2014-03-02 06:42 -0500 http://bitbucket.org/pypy/pypy/changeset/9268939caf10/ Log: fix translation diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -161,7 +161,6 @@ const = str class StringFormatter(BaseStringFormatter): - def __init__(self, space, fmt, values_w, w_valuedict): BaseStringFormatter.__init__(self, space, values_w, w_valuedict) self.fmt = fmt # either a string or a unicode @@ -218,7 +217,7 @@ self.peel_flags() - self.width = self.peel_num('width', self.space.int_w, sys.maxint) + self.width = self.peel_num('width', sys.maxint) if self.width < 0: # this can happen: '%*s' % (-5, "hi") self.f_ljust = True @@ -226,7 +225,7 @@ if self.peekchr() == '.': self.forward() - self.prec = self.peel_num('prec', self.space.c_int_w, INT_MAX) + self.prec = self.peel_num('prec', INT_MAX) if self.prec < 0: self.prec = 0 # this can happen: '%.*f' % (-5, 3) else: @@ -264,13 +263,18 @@ # Same as getmappingkey @jit.unroll_safe - def peel_num(self, name, conv_w, maxval): + def peel_num(self, name, maxval): space = self.space c = self.peekchr() if c == '*': self.forward() w_value = self.nextinputvalue() - return conv_w(w_value) + if name == 'width': + return space.int_w(w_value) + elif name == 'prec': + return space.c_int_w(w_value) + else: + assert False result = 0 while True: digit = ord(c) - ord('0') From noreply at buildbot.pypy.org Sun Mar 2 13:07:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 13:07:19 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: cleanup some duplicated tests Message-ID: <20140302120719.275881C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69612:9bf739c1dbf9 Date: 2014-03-02 06:51 -0500 http://bitbucket.org/pypy/pypy/changeset/9bf739c1dbf9/ Log: cleanup some duplicated tests diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -1,4 +1,3 @@ - import _rawffi from _rawffi import alt as _ffi import sys diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -512,38 +512,6 @@ assert (y.a, y.b, y.c) == (-1, -7, 0) y.free() - def test_structure_bitfields_longlong(self): - import _rawffi - Z = _rawffi.Structure([('a', 'Q', 1), - ('b', 'Q', 62), - ('c', 'Q', 1)]) - z = Z() - z.a, z.b, z.c = 7, 0x1000000000000001, 7 - assert (z.a, z.b, z.c) == (1, 0x1000000000000001, 1) - z.free() - - def test_structure_ulonglong_bitfields(self): - import _rawffi - X = _rawffi.Structure([('A', 'Q', 1), - ('B', 'Q', 62), - ('C', 'Q', 1)]) - x = X() - x.A, x.B, x.C = 7, 0x1000000000000001, 7 - assert x.A == 1 - assert x.B == 0x1000000000000001 - assert x.C == 1 - x.free() - - def test_structure_longlong_bitfields(self): - import _rawffi - Y = _rawffi.Structure([('a', 'q', 1), - ('b', 'q', 61), - ('c', 'q', 1)]) - y = Y() - y.a, y.b, y.c = 0, -7, 0 - assert (y.a, y.b, y.c) == (0, -7, 0) - y.free() - def test_structure_ulonglong_bitfields(self): import _rawffi X = _rawffi.Structure([('A', 'Q', 1), @@ -742,7 +710,6 @@ finally: sys.stderr = orig - def test_setattr_struct(self): import _rawffi X = _rawffi.Structure([('value1', 'i'), ('value2', 'i')]) @@ -773,7 +740,6 @@ assert s.value == 4 s.free() - def test_array_addressof(self): import _rawffi lib = _rawffi.CDLL(self.lib_name) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_bitfields.py b/pypy/module/test_lib_pypy/ctypes_tests/test_bitfields.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_bitfields.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_bitfields.py @@ -5,6 +5,11 @@ import ctypes +signed_int_types = (c_byte, c_short, c_int, c_long, c_longlong) +unsigned_int_types = (c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong) +int_types = unsigned_int_types + signed_int_types + + def setup_module(mod): import conftest _ctypes_test = str(conftest.sofile) @@ -12,6 +17,7 @@ func.argtypes = POINTER(BITS), c_char mod.func = func + class BITS(Structure): _fields_ = [("A", c_int, 1), ("B", c_int, 2), @@ -32,9 +38,7 @@ ("S", c_short, 7)] - class TestC: - def test_ints(self): for i in range(512): for name in "ABCDEFGHI": @@ -49,12 +53,8 @@ setattr(b, name, i) assert (name, i, getattr(b, name)) == (name, i, func(byref(b), name)) -signed_int_types = (c_byte, c_short, c_int, c_long, c_longlong) -unsigned_int_types = (c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong) -int_types = unsigned_int_types + signed_int_types class TestBitField: - def test_longlong(self): class X(Structure): _fields_ = [("a", c_longlong, 1), @@ -98,7 +98,6 @@ x.a, x.b = 0, -1 assert (c_typ, x.a, x.b, x.c) == (c_typ, 0, -1, 0) - def test_unsigned(self): for c_typ in unsigned_int_types: class X(Structure): @@ -114,7 +113,6 @@ x.a, x.b = 0, -1 assert (c_typ, x.a, x.b, x.c) == (c_typ, 0, 7, 0) - def fail_fields(self, *fields): return self.get_except(type(Structure), "X", (), {"_fields_": fields}) @@ -194,7 +192,6 @@ assert X.b.offset == sizeof(c_short)*1 assert X.c.offset == sizeof(c_short)*2 - def get_except(self, func, *args, **kw): try: func(*args, **kw) @@ -245,7 +242,6 @@ A._fields_ = [("a", POINTER(A)), ("b", c_ubyte, 4)] - def test_set_fields_cycle_fails(self): class A(Structure): pass diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py b/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py @@ -4,6 +4,7 @@ import py + class TestSubclasses(BaseCTypesTestChecker): def test_subclass(self): class X(Structure): @@ -45,6 +46,7 @@ assert Y._fields_ == [("b", c_int)] assert Z._fields_ == [("a", c_int)] + class TestStructure(BaseCTypesTestChecker): formats = {"c": c_char, "b": c_byte, @@ -163,7 +165,6 @@ # offset is always relative to the class... def test_packed(self): - py.test.skip("custom alignment not supported") class X(Structure): _fields_ = [("a", c_byte), ("b", c_longlong)] @@ -242,7 +243,6 @@ # Try a second time, result may be different (cf. issue1498) pos = POSITION(1, 2) assert (pos.x, pos.y) == (1, 2) - def test_invalid_field_types(self): class POINT(Structure): @@ -460,8 +460,8 @@ class X(Structure): _fields_ = [(u"i", c_int)] + class TestPointerMember(BaseCTypesTestChecker): - def test_1(self): # a Structure with a POINTER field class S(Structure): @@ -515,7 +515,6 @@ else: raise AssertionError, "Structure or union cannot contain itself" - def test_vice_versa(self): py.test.skip("mutually dependent lazily defined structures error semantics") class First(Structure): @@ -563,4 +562,3 @@ x = X() assert x.x == 0 - From noreply at buildbot.pypy.org Sun Mar 2 14:18:32 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Sun, 2 Mar 2014 14:18:32 +0100 (CET) Subject: [pypy-commit] pypy default: try to print the class of the virtual that is being promoted Message-ID: <20140302131832.94F241C0865@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r69613:366c6b15a89b Date: 2014-03-02 14:17 +0100 http://bitbucket.org/pypy/pypy/changeset/366c6b15a89b/ Log: try to print the class of the virtual that is being promoted diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -271,7 +271,13 @@ def optimize_GUARD_VALUE(self, op): value = self.getvalue(op.getarg(0)) if value.is_virtual(): - raise InvalidLoop('A promote of a virtual (a recently allocated object) never makes sense!') + arg = value.get_constant_class(self.optimizer.cpu) + if arg: + addr = arg.getaddr() + name = self.optimizer.metainterp_sd.get_name_from_address(addr) + else: + name = "" + raise InvalidLoop('A promote of a virtual %s (a recently allocated object) never makes sense!' % name) if value.last_guard: # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value, which is rather silly. diff --git a/rpython/jit/metainterp/optimizeopt/test/test_disable_optimizations.py b/rpython/jit/metainterp/optimizeopt/test/test_disable_optimizations.py --- a/rpython/jit/metainterp/optimizeopt/test/test_disable_optimizations.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_disable_optimizations.py @@ -35,8 +35,8 @@ def raises(self, e, fn, *args): try: fn(*args) - except e: - pass + except Exception, e: + return e opt = allopts[optnum] exec "TestNo%sLLtype = TestLLtype" % (opt[0].upper() + opt[1:]) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -91,7 +91,7 @@ return loop def raises(self, e, fn, *args): - py.test.raises(e, fn, *args) + return py.test.raises(e, fn, *args).value class OptimizeOptTest(BaseTestWithUnroll): @@ -2824,8 +2824,10 @@ guard_value(p2, ConstPtr(myptr)) [] jump(p2) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + exc = self.raises(InvalidLoop, self.optimize_loop, + ops, "crash!") + if exc: + assert "node" in exc.msg def test_merge_guard_class_guard_value(self): ops = """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -321,6 +321,13 @@ max_retrace_guards = 15 jitcounter = DeterministicJitCounter() + def get_name_from_address(self, addr): + # hack + try: + return "".join(addr.ptr.name)[:-1] # remove \x00 + except AttributeError: + return "" + class Storage(compile.ResumeGuardDescr): "for tests." def __init__(self, metainterp_sd=None, original_greenkey=None): From noreply at buildbot.pypy.org Sun Mar 2 14:25:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 14:25:58 +0100 (CET) Subject: [pypy-commit] stmgc default: Progress, but still disabled because I got one crash in duhton Message-ID: <20140302132558.69AE61C0865@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r923:3d93e48f1cce Date: 2014-03-02 14:02 +0100 http://bitbucket.org/pypy/stmgc/changeset/3d93e48f1cce/ Log: Progress, but still disabled because I got one crash in duhton diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -50,7 +50,7 @@ uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; if (prev_owner != 0 && prev_owner != STM_PSEGMENT->write_lock_num) { - uint8_t other_segment_num = prev_owner - _SINGLE_SEGMENT_PAGE; + uint8_t other_segment_num = prev_owner - 1; assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); contention_management(other_segment_num); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -192,33 +192,25 @@ /************************************************************/ -static inline void mark_single_flag_private(uintptr_t pagenum, uint8_t flagnum) +static inline void mark_single_flag_private(uintptr_t pagenum) { - uint8_t old_flag = flag_page_private[pagenum]; - - if (old_flag == SHARED_PAGE) /* nothing to do, page already shared */ - return; - - if (old_flag == flagnum) /* page already marked for this segment */ - return; - - if (old_flag == PRIVATE_PAGE) { /* a not-seen-before private page */ - flag_page_private[pagenum] = flagnum; - return; + if (flag_page_private[pagenum] == PRIVATE_PAGE) { + assert(pagenum >= END_NURSERY_PAGE); + assert(pagenum < NB_PAGES); + flag_page_private[pagenum] = SEGMENT1_PAGE; } - - /* else, conflict: the page has been seen from two different segments. - Use REMAPPING_PAGE to mean this situation here. */ - flag_page_private[pagenum] = REMAPPING_PAGE; + else { + assert(flag_page_private[pagenum] == SHARED_PAGE || + flag_page_private[pagenum] == SEGMENT1_PAGE); + } } -static inline void mark_flag_page_private(object_t *obj, uint8_t flag_num, - char *segment_base) +static inline void mark_flag_page_private(object_t *obj, char *segment_base) { uintptr_t first_page = ((uintptr_t)obj) / 4096UL; if (LIKELY((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0)) { - mark_single_flag_private(first_page, flag_num); + mark_single_flag_private(first_page); } else { char *realobj; @@ -233,49 +225,73 @@ end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; while (first_page < end_page) - mark_single_flag_private(first_page++, flag_num); + mark_single_flag_private(first_page++); } } static void major_reshare_pages_range(uintptr_t first_page, uintptr_t end_page) { + return; /* XXX DISABLED */ + uintptr_t i; for (i = first_page; i < end_page; i++) { - uint8_t flag = flag_page_private[i]; + switch (flag_page_private[i]) { - if (flag == REMAPPING_PAGE) { + case SEGMENT1_PAGE: /* this page stays private after major collection */ flag_page_private[i] = PRIVATE_PAGE; - } - else if (flag >= PRIVATE_PAGE) { - /* this page becomes shared again */ + break; - /* XXX rather slow version here. improve! */ + case PRIVATE_PAGE:; + /* this page becomes shared again. No object in it was + traced belonging to a segment other than 0. - abort(); /* doesn't work, actually. we can't keep object data - from segment 1 and largemalloc's chunk data from - segment 0. mess mess mess */ - - char buffer[4096 + 64]; - char *pbuffer = buffer; - pbuffer += ((-(uintptr_t)pbuffer) & 63); /* align */ - + XXX This is maybe a too-strict condition, but the more + general condition "all traced objects belong to the same + segment" has problems with large objects in segments > 0. + More precisely: we'd need to keep in the shared page the + content of the objects (from segment > 0), but also the + largemalloc's chunk data (stored in segment 0). + */ +#if NB_SEGMENTS != 2 +# error "limited to NB_SEGMENTS == 2" +#endif char *ppage0 = get_segment_base(0) + i * 4096; char *ppage1 = get_segment_base(1) + i * 4096; - /* do two copies: out of the page seen now as in the seg 0, - and then back into the same location after remapping */ - pagecopy(pbuffer, ppage0); - /* a better approach is possible in which we don't have this */ - madvise(ppage0, 4096, MADV_DONTNEED); - madvise(ppage1, 4096, MADV_DONTNEED); - d_remap_file_pages(ppage0, 4096, i); + /* two cases... either the mapping is (0->0, 1->1) or (0->1, + 1->0). Distinguish which case it is by hacking a lot */ + + // 0->0,1->1 or 0->1,1->0 d_remap_file_pages(ppage1, 4096, i); - pagecopy(ppage0, pbuffer); + // 0->0,1->0 or 0->1,1->0 + + char oldvalue0 = *ppage0; + char oldvalue1 = *ppage1; + asm("":::"memory"); + *ppage0 = 1 + oldvalue1; + asm("":::"memory"); + char newvalue1 = *ppage1; + asm("":::"memory"); + *ppage0 = oldvalue0; + if (oldvalue1 == newvalue1) { + // 0->1,1->0 + pagecopy(ppage1, ppage0); // copy from page0 to page1, + // i.e. from the underlying memory seg1 to seg0 + d_remap_file_pages(ppage0, 4096, i); + // 0->0,1->0 + } flag_page_private[i] = SHARED_PAGE; increment_total_allocated(-4096 * (NB_SEGMENTS-1)); + break; + + case SHARED_PAGE: + break; /* stay shared */ + + default: + assert(!"unexpected flag_page_private"); } } } @@ -285,7 +301,7 @@ /* re-share pages if possible. Each re-sharing decreases total_allocated by 4096. */ major_reshare_pages_range( - END_NURSERY_PAGE, + END_NURSERY_PAGE, /* not the nursery! */ (uninitialized_page_start - stm_object_pages) / 4096UL); major_reshare_pages_range( (uninitialized_page_stop - stm_object_pages) / 4096UL, @@ -319,17 +335,15 @@ static void mark_trace(object_t *obj, char *segment_base) { - uint8_t flag_num = - ((struct stm_priv_segment_info_s *) - REAL_ADDRESS(segment_base, STM_PSEGMENT))->write_lock_num; - assert(list_is_empty(mark_objects_to_trace)); while (1) { - /* first update the flag in flag_page_private[] to correspond - to this segment */ - if (0) mark_flag_page_private(obj, flag_num, segment_base); + /* first, if we're not seeing segment 0, we must change the + flags in flag_page_private[] from PRIVATE_PAGE to + REMAPPING_PAGE, which will mean "can't re-share" */ + if (segment_base != stm_object_pages && 0 /* XXX DISABLED */) + mark_flag_page_private(obj, segment_base); /* trace into the object (the version from 'segment_base') */ struct object_s *realobj = @@ -352,30 +366,42 @@ static void mark_visit_from_roots(void) { - stm_thread_local_t *tl = stm_all_thread_locals; - do { - /* If 'tl' is currently running, its 'associated_segment_num' - field is the segment number that contains the correct - version of its overflowed objects. If not, then the - field is still some correct segment number, and it doesn't - matter which one we pick. */ - char *segment_base = get_segment_base(tl->associated_segment_num); - - object_t **current = tl->shadowstack; - object_t **base = tl->shadowstack_base; - while (current-- != base) { - assert(*current != (object_t *)-1); - mark_visit_object(*current, segment_base); - } - mark_visit_object(tl->thread_local_obj, segment_base); - - tl = tl->next; - } while (tl != stm_all_thread_locals); if (testing_prebuilt_objs != NULL) { LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/, mark_visit_object(item, get_segment_base(0))); } + + /* Do the following twice, so that we trace first the objects from + segment 0, and then all others. XXX This is a hack to make it + more likely that we'll be able to re-share pages. */ + + int must_be_zero; + for (must_be_zero = 1; must_be_zero >= 0; must_be_zero--) { + + stm_thread_local_t *tl = stm_all_thread_locals; + do { + /* If 'tl' is currently running, its 'associated_segment_num' + field is the segment number that contains the correct + version of its overflowed objects. If not, then the + field is still some correct segment number, and it doesn't + matter which one we pick. */ + char *segment_base = get_segment_base(tl->associated_segment_num); + + if (must_be_zero == (segment_base == get_segment_base(0))) { + + object_t **current = tl->shadowstack; + object_t **base = tl->shadowstack_base; + while (current-- != base) { + assert(*current != (object_t *)-1); + mark_visit_object(*current, segment_base); + } + mark_visit_object(tl->thread_local_obj, segment_base); + } + + tl = tl->next; + } while (tl != stm_all_thread_locals); + } } static void mark_visit_from_modified_objects(void) @@ -523,7 +549,7 @@ /* sweeping */ mutex_pages_lock(); - if (0) major_reshare_pages(); + major_reshare_pages(); sweep_large_objects(); //sweep_uniform_pages(); mutex_pages_unlock(); diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -14,9 +14,9 @@ /* Page is private for each segment. */ PRIVATE_PAGE, - /* Higher values are used by gcpage.c to mark pages that are privatized - but where so far only one segment was found. */ - _SINGLE_SEGMENT_PAGE + /* gcpage.c: page contains objects that have been traced in the + segment > 0 */ + SEGMENT1_PAGE, }; static uint8_t flag_page_private[NB_PAGES]; diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -49,8 +49,8 @@ PROT_NONE); struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(_SINGLE_SEGMENT_PAGE + i <= 255); - pr->write_lock_num = _SINGLE_SEGMENT_PAGE + i; + assert(i + 1 <= 255); + pr->write_lock_num = i + 1; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; pr->objects_pointing_to_nursery = NULL; From noreply at buildbot.pypy.org Sun Mar 2 14:25:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 14:25:59 +0100 (CET) Subject: [pypy-commit] stmgc default: I could get a crash only once, and not in gdb, so I've no clue if it's related to this or not at all --- likely not. Message-ID: <20140302132559.68EEA1C0865@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r924:688adfa95b50 Date: 2014-03-02 14:25 +0100 http://bitbucket.org/pypy/stmgc/changeset/688adfa95b50/ Log: I could get a crash only once, and not in gdb, so I've no clue if it's related to this or not at all --- likely not. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -231,8 +231,6 @@ static void major_reshare_pages_range(uintptr_t first_page, uintptr_t end_page) { - return; /* XXX DISABLED */ - uintptr_t i; for (i = first_page; i < end_page; i++) { @@ -342,7 +340,7 @@ /* first, if we're not seeing segment 0, we must change the flags in flag_page_private[] from PRIVATE_PAGE to REMAPPING_PAGE, which will mean "can't re-share" */ - if (segment_base != stm_object_pages && 0 /* XXX DISABLED */) + if (segment_base != stm_object_pages) mark_flag_page_private(obj, segment_base); /* trace into the object (the version from 'segment_base') */ From noreply at buildbot.pypy.org Sun Mar 2 16:29:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 16:29:48 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix _rawffi bitfield mask calculation Message-ID: <20140302152948.0A41C1C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69614:0bee77fda190 Date: 2014-03-02 09:32 -0500 http://bitbucket.org/pypy/pypy/changeset/0bee77fda190/ Log: fix _rawffi bitfield mask calculation diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -1,4 +1,3 @@ - """ Interpreter-level implementation of structure, exposing ll-structure to app-level with apropriate interface """ @@ -17,8 +16,8 @@ from pypy.module._rawffi.interp_rawffi import size_alignment from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib import clibffi, rgc -from rpython.rlib.rarithmetic import intmask, signedtype, widen -from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong +from rpython.rlib.rarithmetic import intmask, signedtype, widen, r_uint + def unpack_fields(space, w_fields): fields_w = space.unpackiterable(w_fields) @@ -269,31 +268,24 @@ return x >> 16 def BIT_MASK(x, ll_t): - if ll_t is lltype.SignedLongLong: - return (r_longlong(1) << x) - 1 - elif ll_t is lltype.UnsignedLongLong: - return (r_ulonglong(1) << x) - 1 - return (1 << x) -1 + return (((1 << (x - 1)) - 1) << 1) + 1 BIT_MASK._annspecialcase_ = 'specialize:arg(1)' def push_field(self, num, value): ptr = rffi.ptradd(self.ll_buffer, self.shape.ll_positions[num]) TP = lltype.typeOf(value) - T = lltype.Ptr(rffi.CArray(TP)) - # Handle bitfields for c in unroll_letters_for_numbers: if LL_TYPEMAP[c] is TP and self.shape.ll_bitsizes: # Modify the current value with the bitfield changed bitsize = self.shape.ll_bitsizes[num] numbits = NUM_BITS(bitsize) - lowbit = LOW_BIT(bitsize) if numbits: + lowbit = LOW_BIT(bitsize) + bitmask = BIT_MASK(numbits, TP) value = widen(value) - bitmask = BIT_MASK(numbits, TP) - # current = widen(read_ptr(ptr, 0, TP)) - current &= ~ (bitmask << lowbit) + current &= ~(bitmask << lowbit) current |= (value & bitmask) << lowbit value = rffi.cast(TP, current) break @@ -302,29 +294,24 @@ def cast_pos(self, i, ll_t): pos = rffi.ptradd(self.ll_buffer, self.shape.ll_positions[i]) - TP = lltype.Ptr(rffi.CArray(ll_t)) value = read_ptr(pos, 0, ll_t) - # Handle bitfields for c in unroll_letters_for_numbers: if LL_TYPEMAP[c] is ll_t and self.shape.ll_bitsizes: bitsize = self.shape.ll_bitsizes[i] numbits = NUM_BITS(bitsize) - lowbit = LOW_BIT(bitsize) if numbits: + lowbit = LOW_BIT(bitsize) + bitmask = BIT_MASK(numbits, ll_t) value = widen(rffi.cast(ll_t, value)) - bitmask = BIT_MASK(numbits, ll_t) - # value >>= lowbit value &= bitmask if ll_t is lltype.Bool or signedtype(ll_t._type): sign = (value >> (numbits - 1)) & 1 if sign: - one = r_longlong(1) if ll_t is lltype.SignedLongLong else 1 - value = value - (one << numbits) + value = value - (1 << numbits) value = rffi.cast(ll_t, value) break - return value cast_pos._annspecialcase_ = 'specialize:arg(2)' diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -534,6 +534,15 @@ assert (y.a, y.b, y.c) == (0, -7, 0) y.free() + def test_structure_single_longbit_bitfield(self): + import _rawffi + for s in [('I', 32), ('Q', 64)]: + Y = _rawffi.Structure([('a',) + s]) + y = Y() + y.a = 10 + assert y.a == 10 + y.free() + def test_invalid_bitfields(self): import _rawffi raises(TypeError, _rawffi.Structure, [('A', 'c', 1)]) From noreply at buildbot.pypy.org Sun Mar 2 16:29:49 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 16:29:49 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: clean up select overflow checking Message-ID: <20140302152949.495E31C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69615:5b6d13a5d362 Date: 2014-03-02 10:29 -0500 http://bitbucket.org/pypy/pypy/changeset/5b6d13a5d362/ Log: clean up select overflow checking diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -7,7 +7,8 @@ from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, compute_unique_id) from rpython.rlib.signature import signature -from rpython.rlib.rarithmetic import r_uint +from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ + INT_MIN, INT_MAX, UINT_MAX from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, UserDelAction) @@ -18,8 +19,6 @@ __all__ = ['ObjSpace', 'OperationError', 'W_Root'] -UINT_MAX_32_BITS = r_uint(4294967295) - unpackiterable_driver = jit.JitDriver(name='unpackiterable', greens=['tp'], reds=['items', 'w_iterator']) @@ -1466,7 +1465,7 @@ # Like space.gateway_int_w(), but raises an app-level OverflowError if # the integer does not fit in 32 bits. Here for gateway.py. value = self.gateway_int_w(w_obj) - if value < -2147483647-1 or value > 2147483647: + if value < INT_MIN or value > INT_MAX: raise OperationError(self.w_OverflowError, self.wrap("expected a 32-bit integer")) return value @@ -1475,7 +1474,7 @@ # Like space.gateway_uint_w(), but raises an app-level OverflowError if # the integer does not fit in 32 bits. Here for gateway.py. value = self.uint_w(w_obj) - if value > UINT_MAX_32_BITS: + if value > UINT_MAX: raise OperationError(self.w_OverflowError, self.wrap("expected an unsigned 32-bit integer")) return value @@ -1488,11 +1487,21 @@ if value < 0: raise OperationError(self.w_ValueError, self.wrap("expected a non-negative integer")) - if value > 2147483647: + if value > INT_MAX: raise OperationError(self.w_OverflowError, self.wrap("expected a 32-bit integer")) return value + def c_short_w(self, w_obj): + value = self.int_w(w_obj) + if value < SHRT_MIN: + raise oefmt(self.w_OverflowError, + "signed short integer is less than minimum") + elif value > SHRT_MAX: + raise oefmt(self.w_OverflowError, + "signed short integer is greater than maximum") + return value + def truncatedint_w(self, w_obj, allow_conversion=True): # Like space.gateway_int_w(), but return the integer truncated # instead of raising OverflowError. For obscure cases only. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -147,6 +147,9 @@ def visit_c_nonnegint(self, el, app_sig): self.checked_space_method(el, app_sig) + def visit_c_short(self, el, app_sig): + self.checked_space_method(el, app_sig) + def visit_truncatedint_w(self, el, app_sig): self.checked_space_method(el, app_sig) @@ -261,6 +264,9 @@ def visit_c_nonnegint(self, typ): self.run_args.append("space.c_nonnegint_w(%s)" % (self.scopenext(),)) + def visit_c_short(self, typ): + self.run_args.append("space.c_short_w(%s)" % (self.scopenext(),)) + def visit_truncatedint_w(self, typ): self.run_args.append("space.truncatedint_w(%s)" % (self.scopenext(),)) @@ -397,6 +403,9 @@ def visit_c_nonnegint(self, typ): self.unwrap.append("space.c_nonnegint_w(%s)" % (self.nextarg(),)) + def visit_c_short(self, typ): + self.unwrap.append("space.c_short_w(%s)" % (self.nextarg(),)) + def visit_truncatedint_w(self, typ): self.unwrap.append("space.truncatedint_w(%s)" % (self.nextarg(),)) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -1,7 +1,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, wrap_oserror, oefmt from rpython.rlib import rpoll import errno @@ -20,12 +20,8 @@ def __init__(self): self.fddict = {} - @unwrap_spec(events=int) + @unwrap_spec(events="c_short") def register(self, space, w_fd, events=defaultevents): - if not -32767 - 1 <= events <= 32767: - m = ("signed short integer is " + - "greater than maximum" if events > 0 else "less than minimum") - raise OperationError(space.w_OverflowError, space.wrap(m)) fd = space.c_filedescriptor_w(w_fd) self.fddict[fd] = events @@ -53,15 +49,11 @@ # we want to be compatible with cpython and also accept things # that can be casted to integer (I think) try: - # compute the integer - timeout = space.int_w(space.int(w_timeout)) - except (OverflowError, ValueError): - raise OperationError(space.w_ValueError, - space.wrap("math range error")) - - if not -2147483647 - 1 <= timeout <= 2147483647: - msg = "Python int too large to convert to C int" - raise OperationError(space.w_OverflowError, space.wrap(msg)) + w_timeout = space.int(w_timeout) + except OperationError: + raise oefmt(space.w_TypeError, + "timeout must be an integer or None") + timeout = space.c_int_w(w_timeout) try: retval = rpoll.poll(self.fddict, timeout) diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -213,7 +213,7 @@ readend.close() writeend.close() - def test_poll_int_overflow(self): + def test_poll_int_arguments(self): import select pollster = select.poll() @@ -222,12 +222,16 @@ raises(OverflowError, pollster.poll, 1L << 64) pollster = select.poll() - raises(OverflowError, pollster.register, 0, 32768) # SHRT_MAX + 1 - raises(OverflowError, pollster.register, 0, -32768 - 1) + exc = raises(OverflowError, pollster.register, 0, 32768) # SHRT_MAX + 1 + assert exc.value[0] == 'signed short integer is greater than maximum' + exc = raises(OverflowError, pollster.register, 0, -32768 - 1) + assert exc.value[0] == 'signed short integer is less than minimum' raises(OverflowError, pollster.register, 0, 65535) # USHRT_MAX + 1 raises(OverflowError, pollster.poll, 2147483648) # INT_MAX + 1 raises(OverflowError, pollster.poll, -2147483648 - 1) raises(OverflowError, pollster.poll, 4294967296) # UINT_MAX + 1 + exc = raises(TypeError, pollster.poll, '123') + assert exc.value[0] == 'timeout must be an integer or None' class AppTestSelectWithPipes(_AppTestSelect): diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -75,7 +75,11 @@ # to handle the win64 special case: is_emulated_long = _long_typecode != 'l' +SHRT_MIN = -2**(_get_bitsize('h') - 1) +SHRT_MAX = 2**(_get_bitsize('h') - 1) - 1 +INT_MIN = -2**(_get_bitsize('i') - 1) INT_MAX = 2**(_get_bitsize('i') - 1) - 1 +UINT_MAX = 2**_get_bitsize('i') - 1 LONG_BIT = _get_long_bit() LONG_MASK = (2**LONG_BIT)-1 From noreply at buildbot.pypy.org Sun Mar 2 16:44:27 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 2 Mar 2014 16:44:27 +0100 (CET) Subject: [pypy-commit] cffi default: fix for win32 Message-ID: <20140302154427.094821C0865@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r1470:023010b338a2 Date: 2014-03-02 17:33 +0200 http://bitbucket.org/cffi/cffi/changeset/023010b338a2/ Log: fix for win32 diff --git a/testing/test_function.py b/testing/test_function.py --- a/testing/test_function.py +++ b/testing/test_function.py @@ -34,6 +34,12 @@ def getvalue(self): return self._value +lib_m = 'm' +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' class TestFunction(object): Backend = CTypesBackend @@ -43,18 +49,16 @@ ffi.cdef(""" double sin(double x); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x == math.sin(1.23) def test_sinf(self): - if sys.platform == 'win32': - py.test.skip("no 'sinf'") ffi = FFI(backend=self.Backend()) ffi.cdef(""" float sinf(float x); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sinf(1.23) assert type(x) is float assert x != math.sin(1.23) # rounding effects @@ -66,14 +70,14 @@ ffi.cdef(""" void sin(double x); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x is None def test_dlopen_filename(self): - path = ctypes.util.find_library("m") + path = ctypes.util.find_library(lib_m) if not path: - py.test.skip("libm not found") + py.test.skip("%s not found" % lib_m) ffi = FFI(backend=self.Backend()) ffi.cdef(""" double cos(double x); @@ -91,7 +95,7 @@ ffi.cdef(""" double cos(double x); """) - m = ffi.dlopen("m", ffi.RTLD_LAZY | ffi.RTLD_LOCAL) + m = ffi.dlopen(lib_m, ffi.RTLD_LAZY | ffi.RTLD_LOCAL) x = m.cos(1.23) assert x == math.cos(1.23) @@ -292,7 +296,7 @@ typedef double func_t(double); func_t sin; """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x == math.sin(1.23) @@ -355,7 +359,7 @@ ffi.cdef(""" int nonexistent(); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) assert not hasattr(m, 'nonexistent') def test_wraps_from_stdlib(self): @@ -369,7 +373,7 @@ def wrapper(*args): return f(*args) + 100 return wrapper - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) sin100 = my_decorator(m.sin) x = sin100(1.23) assert x == math.sin(1.23) + 100 diff --git a/testing/test_parsing.py b/testing/test_parsing.py --- a/testing/test_parsing.py +++ b/testing/test_parsing.py @@ -36,7 +36,7 @@ totalsize=-1, totalalignment=-1, sflags=0): assert isinstance(s, FakeStruct) s.fields = fields - + def new_array_type(self, ptrtype, length): return FakeType('' % (ptrtype, length)) @@ -60,7 +60,7 @@ return ', '.join([str(y) + str(x) for x, y, z in self.fields]) class FakeLibrary(object): - + def load_function(self, BType, name): return FakeFunction(BType, name) @@ -70,11 +70,17 @@ self.BType = str(BType) self.name = name +lib_m = "m" +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' def test_simple(): ffi = FFI(backend=FakeBackend()) ffi.cdef("double sin(double x);") - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) func = m.sin # should be a callable on real backends assert func.name == 'sin' assert func.BType == '), , False>' @@ -148,7 +154,7 @@ x, double/*several*//*comment*/y) /*on the same line*/ ; """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) func = m.sin assert func.name == 'sin' assert func.BType == ', ), , False>' diff --git a/testing/test_unicode_literals.py b/testing/test_unicode_literals.py --- a/testing/test_unicode_literals.py +++ b/testing/test_unicode_literals.py @@ -10,6 +10,13 @@ import sys, math from cffi import FFI +lib_m = "m" +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' + def test_cast(): ffi = FFI() @@ -55,7 +62,7 @@ def test_dlopen(): ffi = FFI() ffi.cdef("double sin(double x);") - m = ffi.dlopen("m") # unicode literal + m = ffi.dlopen(lib_m) # unicode literal x = m.sin(1.23) assert x == math.sin(1.23) diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -4,7 +4,12 @@ from testing.support import * +lib_m = ['m'] if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = ['msvcrt'] pass # no obvious -Werror equivalent on MSVC else: if (sys.platform == 'darwin' and @@ -63,13 +68,13 @@ def test_simple_case(): ffi = FFI() ffi.cdef("double sin(double x);") - lib = ffi.verify('#include ', libraries=["m"]) + lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) def test_rounding_1(): ffi = FFI() ffi.cdef("float sin(double x);") - lib = ffi.verify('#include ', libraries=["m"]) + lib = ffi.verify('#include ', libraries=lib_m) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -77,7 +82,7 @@ def test_rounding_2(): ffi = FFI() ffi.cdef("double sin(float x);") - lib = ffi.verify('#include ', libraries=["m"]) + lib = ffi.verify('#include ', libraries=lib_m) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -103,7 +108,7 @@ def test_longdouble(): ffi = FFI() ffi.cdef("long double sinl(long double x);") - lib = ffi.verify('#include ', libraries=["m"]) + lib = ffi.verify('#include ', libraries=lib_m) for input in [1.23, ffi.cast("double", 1.23), ffi.cast("long double", 1.23)]: diff --git a/testing/test_zdistutils.py b/testing/test_zdistutils.py --- a/testing/test_zdistutils.py +++ b/testing/test_zdistutils.py @@ -7,6 +7,13 @@ class DistUtilsTest(object): + def setup_class(self): + self.lib_m = "m" + if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + self.lib_m = 'msvcrt' def test_locate_engine_class(self): cls = _locate_engine_class(FFI(), self.generic) @@ -26,7 +33,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) v.write_source() with open(v.sourcefilename, 'r') as f: data = f.read() @@ -37,7 +44,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) v.sourcefilename = filename = str(udir.join('write_source.c')) v.write_source() assert filename == v.sourcefilename @@ -50,7 +57,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) try: from StringIO import StringIO except ImportError: @@ -64,7 +71,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) v.compile_module() assert v.get_module_name().startswith('_cffi_') if v.generates_python_module(): @@ -76,7 +83,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!2*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) basename = self.__class__.__name__ + 'test_compile_module' v.modulefilename = filename = str(udir.join(basename + '.so')) v.compile_module() @@ -93,7 +100,7 @@ ffi.cdef("%s sin(double x);" % csrc) v = Verifier(ffi, "#include ", force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) names.append(v.get_module_name()) assert names[0] == names[1] != names[2] @@ -111,7 +118,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!3*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -122,7 +129,7 @@ udir.join('test_verifier_args.h').write('#include \n') v = Verifier(ffi, csrc, include_dirs=[str(udir)], force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -131,7 +138,7 @@ ffi.cdef("double sin(double x);") csrc = "/*6%s*/\n#include " % self lib = ffi.verify(csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) assert lib.sin(12.3) == math.sin(12.3) assert isinstance(ffi.verifier, Verifier) with open(ffi.verifier.sourcefilename, 'r') as f: @@ -149,7 +156,7 @@ ''' lib = ffi.verify(csrc, define_macros=[('TEST_EXTENSION_OBJECT', '1')], force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) assert lib.sin(12.3) == math.sin(12.3) v = ffi.verifier ext = v.get_extension() @@ -163,7 +170,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there9!%s*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) assert not os.path.exists(v.sourcefilename) v.get_extension() assert os.path.exists(v.sourcefilename) From noreply at buildbot.pypy.org Sun Mar 2 17:05:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 2 Mar 2014 17:05:13 +0100 (CET) Subject: [pypy-commit] pypy default: update cffi, fix most win32 tests Message-ID: <20140302160513.44D411C3427@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69616:3fdf59785f44 Date: 2014-03-02 18:04 +0200 http://bitbucket.org/pypy/pypy/changeset/3fdf59785f44/ Log: update cffi, fix most win32 tests diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -897,11 +897,13 @@ if (c_api_object == NULL) return; if (!PyCapsule_CheckExact(c_api_object)) { + Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); return; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + Py_DECREF(c_api_object); } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_function.py @@ -35,6 +35,12 @@ def getvalue(self): return self._value +lib_m = 'm' +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' class TestFunction(object): Backend = CTypesBackend @@ -44,18 +50,16 @@ ffi.cdef(""" double sin(double x); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x == math.sin(1.23) def test_sinf(self): - if sys.platform == 'win32': - py.test.skip("no 'sinf'") ffi = FFI(backend=self.Backend()) ffi.cdef(""" float sinf(float x); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sinf(1.23) assert type(x) is float assert x != math.sin(1.23) # rounding effects @@ -67,14 +71,14 @@ ffi.cdef(""" void sin(double x); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x is None def test_dlopen_filename(self): - path = ctypes.util.find_library("m") + path = ctypes.util.find_library(lib_m) if not path: - py.test.skip("libm not found") + py.test.skip("%s not found" % lib_m) ffi = FFI(backend=self.Backend()) ffi.cdef(""" double cos(double x); @@ -92,7 +96,7 @@ ffi.cdef(""" double cos(double x); """) - m = ffi.dlopen("m", ffi.RTLD_LAZY | ffi.RTLD_LOCAL) + m = ffi.dlopen(lib_m, ffi.RTLD_LAZY | ffi.RTLD_LOCAL) x = m.cos(1.23) assert x == math.cos(1.23) @@ -293,7 +297,7 @@ typedef double func_t(double); func_t sin; """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x == math.sin(1.23) @@ -356,7 +360,7 @@ ffi.cdef(""" int nonexistent(); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) assert not hasattr(m, 'nonexistent') def test_wraps_from_stdlib(self): @@ -370,7 +374,7 @@ def wrapper(*args): return f(*args) + 100 return wrapper - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) sin100 = my_decorator(m.sin) x = sin100(1.23) assert x == math.sin(1.23) + 100 diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py @@ -37,7 +37,7 @@ totalsize=-1, totalalignment=-1, sflags=0): assert isinstance(s, FakeStruct) s.fields = fields - + def new_array_type(self, ptrtype, length): return FakeType('' % (ptrtype, length)) @@ -61,7 +61,7 @@ return ', '.join([str(y) + str(x) for x, y, z in self.fields]) class FakeLibrary(object): - + def load_function(self, BType, name): return FakeFunction(BType, name) @@ -71,11 +71,17 @@ self.BType = str(BType) self.name = name +lib_m = "m" +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' def test_simple(): ffi = FFI(backend=FakeBackend()) ffi.cdef("double sin(double x);") - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) func = m.sin # should be a callable on real backends assert func.name == 'sin' assert func.BType == '), , False>' @@ -149,7 +155,7 @@ x, double/*several*//*comment*/y) /*on the same line*/ ; """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) func = m.sin assert func.name == 'sin' assert func.BType == ', ), , False>' diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_unicode_literals.py b/pypy/module/test_lib_pypy/cffi_tests/test_unicode_literals.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_unicode_literals.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_unicode_literals.py @@ -11,6 +11,13 @@ import sys, math from cffi import FFI +lib_m = "m" +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' + def test_cast(): ffi = FFI() @@ -56,7 +63,7 @@ def test_dlopen(): ffi = FFI() ffi.cdef("double sin(double x);") - m = ffi.dlopen("m") # unicode literal + m = ffi.dlopen(lib_m) # unicode literal x = m.sin(1.23) assert x == math.sin(1.23) diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -5,7 +5,12 @@ from pypy.module.test_lib_pypy.cffi_tests.support import * +lib_m = ['m'] if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = ['msvcrt'] pass # no obvious -Werror equivalent on MSVC else: if (sys.platform == 'darwin' and @@ -64,13 +69,13 @@ def test_simple_case(): ffi = FFI() ffi.cdef("double sin(double x);") - lib = ffi.verify('#include ', libraries=["m"]) + lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) def test_rounding_1(): ffi = FFI() ffi.cdef("float sin(double x);") - lib = ffi.verify('#include ', libraries=["m"]) + lib = ffi.verify('#include ', libraries=lib_m) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -78,7 +83,7 @@ def test_rounding_2(): ffi = FFI() ffi.cdef("double sin(float x);") - lib = ffi.verify('#include ', libraries=["m"]) + lib = ffi.verify('#include ', libraries=lib_m) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -104,7 +109,7 @@ def test_longdouble(): ffi = FFI() ffi.cdef("long double sinl(long double x);") - lib = ffi.verify('#include ', libraries=["m"]) + lib = ffi.verify('#include ', libraries=lib_m) for input in [1.23, ffi.cast("double", 1.23), ffi.cast("long double", 1.23)]: diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py @@ -8,6 +8,13 @@ class DistUtilsTest(object): + def setup_class(self): + self.lib_m = "m" + if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + self.lib_m = 'msvcrt' def test_locate_engine_class(self): cls = _locate_engine_class(FFI(), self.generic) @@ -27,7 +34,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) v.write_source() with open(v.sourcefilename, 'r') as f: data = f.read() @@ -38,7 +45,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) v.sourcefilename = filename = str(udir.join('write_source.c')) v.write_source() assert filename == v.sourcefilename @@ -51,7 +58,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) try: from StringIO import StringIO except ImportError: @@ -65,7 +72,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) v.compile_module() assert v.get_module_name().startswith('_cffi_') if v.generates_python_module(): @@ -77,7 +84,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!2*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) basename = self.__class__.__name__ + 'test_compile_module' v.modulefilename = filename = str(udir.join(basename + '.so')) v.compile_module() @@ -94,7 +101,7 @@ ffi.cdef("%s sin(double x);" % csrc) v = Verifier(ffi, "#include ", force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) names.append(v.get_module_name()) assert names[0] == names[1] != names[2] @@ -112,7 +119,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!3*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -123,7 +130,7 @@ udir.join('test_verifier_args.h').write('#include \n') v = Verifier(ffi, csrc, include_dirs=[str(udir)], force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -132,7 +139,7 @@ ffi.cdef("double sin(double x);") csrc = "/*6%s*/\n#include " % self lib = ffi.verify(csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) assert lib.sin(12.3) == math.sin(12.3) assert isinstance(ffi.verifier, Verifier) with open(ffi.verifier.sourcefilename, 'r') as f: @@ -150,7 +157,7 @@ ''' lib = ffi.verify(csrc, define_macros=[('TEST_EXTENSION_OBJECT', '1')], force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) assert lib.sin(12.3) == math.sin(12.3) v = ffi.verifier ext = v.get_extension() @@ -164,7 +171,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there9!%s*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) assert not os.path.exists(v.sourcefilename) v.get_extension() assert os.path.exists(v.sourcefilename) From noreply at buildbot.pypy.org Sun Mar 2 17:09:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 17:09:53 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: small changes Message-ID: <20140302160953.6FAE71C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69617:82d9312d3319 Date: 2014-03-02 10:32 -0500 http://bitbucket.org/pypy/pypy/changeset/82d9312d3319/ Log: small changes diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -588,7 +588,7 @@ Modules visible from application programs are imported from interpreter or application level files. PyPy reuses almost all python -modules of CPython's standard library, currently from version 2.7.4. We +modules of CPython's standard library, currently from version 2.7.6. We sometimes need to `modify modules`_ and - more often - regression tests because they rely on implementation details of CPython. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1547,7 +1547,7 @@ self.wrap("fileno() returned a non-integer") ) fd = self.int_w(w_fd) - if fd < 0 or fd > 2147483647: + if fd < 0 or fd > INT_MAX: raise oefmt(self.w_ValueError, "file descriptor cannot be a negative integer (%d)", fd) diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -26,7 +26,7 @@ #define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "2.7.4" +#define PY_VERSION "2.7.6" /* PyPy version as a string */ #define PYPY_VERSION "2.3.0-alpha0" diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -49,6 +49,7 @@ # we want to be compatible with cpython and also accept things # that can be casted to integer (I think) try: + # compute the integer w_timeout = space.int(w_timeout) except OperationError: raise oefmt(space.w_TypeError, diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -7,7 +7,7 @@ from pypy.interpreter import gateway #XXX # the release serial 42 is not in range(16) -CPYTHON_VERSION = (2, 7, 4, "final", 42) +CPYTHON_VERSION = (2, 7, 6, "final", 42) #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h From noreply at buildbot.pypy.org Sun Mar 2 17:09:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 17:09:54 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: clean up pwd module changes Message-ID: <20140302160954.A90741C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69618:beec1f9a3f9e Date: 2014-03-02 11:08 -0500 http://bitbucket.org/pypy/pypy/changeset/beec1f9a3f9e/ Log: clean up pwd module changes diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -5,22 +5,10 @@ from pypy.interpreter.gateway import unwrap_spec from rpython.rlib.rarithmetic import intmask -import sys - - -if sys.maxint == 2147483647: - def check_uid_range(space, num): - pass -else: - def check_uid_range(space, num): - if num < -(1<<31) or num >= (1<<32): - msg = "getpwuid(): uid not found" - raise OperationError(space.w_KeyError, space.wrap(msg)) - eci = ExternalCompilationInfo( includes=['pwd.h'] - ) +) class CConfig: _compilation_info_ = eci @@ -77,15 +65,14 @@ uid = space.int_w(w_uid) except OperationError, e: if e.match(space, space.w_OverflowError): - msg = "getpwuid(): uid not found" - raise OperationError(space.w_KeyError, space.wrap(msg)) + raise oefmt(space.w_KeyError, "getpwuid(): uid not found") raise - check_uid_range(space, uid) pw = c_getpwuid(uid) if not pw: raise oefmt(space.w_KeyError, "getpwuid(): uid not found: %d", uid) return make_struct_passwd(space, pw) + @unwrap_spec(name=str) def getpwnam(space, name): """ @@ -99,6 +86,7 @@ raise oefmt(space.w_KeyError, "getpwnam(): name not found: %s", name) return make_struct_passwd(space, pw) + def getpwall(space): users_w = [] c_setpwent() diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -23,16 +23,21 @@ assert type(pw.pw_uid) is int assert type(pw.pw_gid) is int # should be out of uid_t range + raises(TypeError, pwd.getpwuid) + raises(TypeError, pwd.getpwuid, 3.14) + raises(KeyError, pwd.getpwuid, sys.maxint) + raises(KeyError, pwd.getpwuid, -1) raises(KeyError, pwd.getpwuid, 2**128) raises(KeyError, pwd.getpwuid, -2**128) - raises(KeyError, pwd.getpwuid, (1<<32)) - raises(KeyError, pwd.getpwuid, -(1<<32)) def test_getpwnam(self): import pwd + raises(TypeError, pwd.getpwnam) + raises(TypeError, pwd.getpwnam, 42) raises(KeyError, pwd.getpwnam, '~invalid~') assert pwd.getpwnam('root').pw_name == 'root' def test_getpwall(self): import pwd + raises(TypeError, pwd.getpwall, 42) assert pwd.getpwnam('root') in pwd.getpwall() From noreply at buildbot.pypy.org Sun Mar 2 18:06:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 18:06:54 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: correctness fixes for pwd module Message-ID: <20140302170654.0DEA51C1041@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69619:f99551340203 Date: 2014-03-02 12:03 -0500 http://bitbucket.org/pypy/pypy/changeset/f99551340203/ Log: correctness fixes for pwd module diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -3,32 +3,38 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rarithmetic import intmask, most_pos_value_of -eci = ExternalCompilationInfo( - includes=['pwd.h'] -) +eci = ExternalCompilationInfo(includes=['pwd.h']) class CConfig: _compilation_info_ = eci uid_t = rffi_platform.SimpleType("uid_t") + gid_t = rffi_platform.SimpleType("gid_t") + +config = rffi_platform.configure(CConfig) + +uid_t = config['uid_t'] +gid_t = config['gid_t'] + +class CConfig: + _compilation_info_ = eci passwd = rffi_platform.Struct( 'struct passwd', [('pw_name', rffi.CCHARP), ('pw_passwd', rffi.CCHARP), - ('pw_uid', rffi.INT), - ('pw_gid', rffi.INT), + ('pw_uid', uid_t), + ('pw_gid', gid_t), ('pw_gecos', rffi.CCHARP), ('pw_dir', rffi.CCHARP), - ('pw_shell', rffi.CCHARP), - ]) + ('pw_shell', rffi.CCHARP)]) config = rffi_platform.configure(CConfig) + passwd_p = lltype.Ptr(config['passwd']) -uid_t = config['uid_t'] def external(name, args, result, **kwargs): return rffi.llexternal(name, args, result, compilation_info=eci, **kwargs) @@ -63,10 +69,13 @@ """ try: uid = space.int_w(w_uid) + if uid < -1 or uid > most_pos_value_of(uid_t): + raise OperationError(space.w_OverflowError, None) except OperationError, e: if e.match(space, space.w_OverflowError): raise oefmt(space.w_KeyError, "getpwuid(): uid not found") raise + uid = rffi.cast(uid_t, uid) pw = c_getpwuid(uid) if not pw: raise oefmt(space.w_KeyError, "getpwuid(): uid not found: %d", uid) diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -8,7 +8,7 @@ spaceconfig = dict(usemodules=['pwd']) def test_getpwuid(self): - import pwd, sys + import pwd, sys, re raises(KeyError, pwd.getpwuid, -1) pw = pwd.getpwuid(0) assert pw.pw_name == 'root' @@ -22,13 +22,19 @@ assert pw.pw_shell.startswith('/') assert type(pw.pw_uid) is int assert type(pw.pw_gid) is int - # should be out of uid_t range raises(TypeError, pwd.getpwuid) raises(TypeError, pwd.getpwuid, 3.14) raises(KeyError, pwd.getpwuid, sys.maxint) - raises(KeyError, pwd.getpwuid, -1) - raises(KeyError, pwd.getpwuid, 2**128) - raises(KeyError, pwd.getpwuid, -2**128) + # -1 is allowed, cast to uid_t + exc = raises(KeyError, pwd.getpwuid, -1) + m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value[0]) + assert m + maxval = int(m.group(1)) + assert maxval >= 2**32 - 1 + # should be out of uid_t range + for v in [-2, maxval+1, 2**128, -2**128]: + exc = raises(KeyError, pwd.getpwuid, v) + assert exc.value[0] == 'getpwuid(): uid not found' def test_getpwnam(self): import pwd From noreply at buildbot.pypy.org Sun Mar 2 18:37:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 18:37:35 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix translation Message-ID: <20140302173735.EFBCC1C1041@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69620:254623254a96 Date: 2014-03-02 12:36 -0500 http://bitbucket.org/pypy/pypy/changeset/254623254a96/ Log: fix translation diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -3,7 +3,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from rpython.rlib.rarithmetic import intmask, most_pos_value_of +from rpython.rlib.rarithmetic import intmask, most_pos_value_of, widen eci = ExternalCompilationInfo(includes=['pwd.h']) @@ -78,7 +78,7 @@ uid = rffi.cast(uid_t, uid) pw = c_getpwuid(uid) if not pw: - raise oefmt(space.w_KeyError, "getpwuid(): uid not found: %d", uid) + raise oefmt(space.w_KeyError, "getpwuid(): uid not found: %d", widen(uid)) return make_struct_passwd(space, pw) From noreply at buildbot.pypy.org Sun Mar 2 18:43:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 18:43:51 +0100 (CET) Subject: [pypy-commit] stmgc default: Add "gil-c7", a small file that presents the same API to programs Message-ID: <20140302174351.DA8321C0865@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r925:96659ea5511f Date: 2014-03-02 18:43 +0100 http://bitbucket.org/pypy/stmgc/changeset/96659ea5511f/ Log: Add "gil-c7", a small file that presents the same API to programs but is implemented with a GIL. It contains the minor collector but no major collector. diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -16,5 +16,14 @@ duhton_debug: *.c *.h $(C7SOURCES) $(C7HEADERS) clang -DSTM_DEBUGPRINT -pthread -g -DDu_DEBUG -o duhton_debug *.c ../c7/stmgc.c -Wall + +duhton_nostm: *.c *.h ../gil-c7/stmgc.? + clang -pthread -g -DNDEBUG -O2 -o duhton_nostm *.c ../gil-c7/stmgc.c -Wall -DUSE_GIL + + +duhton_debug_nostm: *.c *.h ../gil-c7/stmgc.? + clang -DSTM_DEBUGPRINT -pthread -g -DDu_DEBUG -o duhton_debug_nostm *.c ../gil-c7/stmgc.c -Wall -DUSE_GIL -ferror-limit=1 + + clean: rm -f duhton duhton_debug duhton_release diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -1,14 +1,18 @@ #ifndef _DUHTON_H_ #define _DUHTON_H_ +/* #undef USE_GIL */ /* forces "gil-c7" instead of "c7" */ + #include #include #include -#include "../c7/stmgc.h" +#ifdef USE_GIL +# include "../gil-c7/stmgc.h" +#else +# include "../c7/stmgc.h" +#endif -#define STM 1 /* hackish removal of all read/write - barriers. synchronization is up to - the program */ + #define DEFAULT_NUM_THREADS 2 extern __thread stm_thread_local_t stm_thread_local; @@ -179,17 +183,8 @@ p1 = (typeof(p1))_pop_root()) -#if STM #define _du_read1(p1) stm_read((object_t *)(p1)) #define _du_write1(p1) stm_write((object_t *)(p1)) -#else -#define _du_read1(p1) -#define _du_write1(p1) { \ - if (UNLIKELY(((object_t *)(p1))->stm_flags & GCFLAG_WRITE_BARRIER)) { \ - LIST_APPEND(_STM_TL->old_objects_to_trace, ((object_t *)(p1))); \ - ((object_t *)(p1))->stm_flags &= ~GCFLAG_WRITE_BARRIER; \ - }} -#endif #ifndef NDEBUG diff --git a/gil-c7/stmgc.c b/gil-c7/stmgc.c new file mode 100644 --- /dev/null +++ b/gil-c7/stmgc.c @@ -0,0 +1,280 @@ +#include "stmgc.h" +#include +#include + +pthread_mutex_t _stm_gil = PTHREAD_MUTEX_INITIALIZER; +stm_thread_local_t *_stm_tloc; + + +/************************************************************/ + +struct list_s { + uintptr_t count; + uintptr_t last_allocated; + uintptr_t items[]; +}; + +static struct list_s *list_create(void); + +static inline void list_free(struct list_s *lst) +{ + free(lst); +} + +#define LIST_FREE(lst) (list_free(lst), (lst) = NULL) + + +static struct list_s *_list_grow(struct list_s *, uintptr_t); + +static inline struct list_s *list_append(struct list_s *lst, uintptr_t item) +{ + uintptr_t index = lst->count++; + if (UNLIKELY(index > lst->last_allocated)) + lst = _list_grow(lst, index); + lst->items[index] = item; + return lst; +} + +#define LIST_APPEND(lst, e) ((lst) = list_append((lst), (uintptr_t)(e))) + + +__attribute__((unused)) +static inline void list_clear(struct list_s *lst) +{ + lst->count = 0; +} + +static inline bool list_is_empty(struct list_s *lst) +{ + return (lst->count == 0); +} + +__attribute__((unused)) +static inline uintptr_t list_count(struct list_s *lst) +{ + return lst->count; +} + +static inline uintptr_t list_pop_item(struct list_s *lst) +{ + assert(lst->count > 0); + return lst->items[--lst->count]; +} + +__attribute__((unused)) +static inline uintptr_t list_item(struct list_s *lst, uintptr_t index) +{ + return lst->items[index]; +} + +__attribute__((unused)) +static inline void list_set_item(struct list_s *lst, uintptr_t index, + uintptr_t newitem) +{ + lst->items[index] = newitem; +} + +#define LIST_FOREACH_R(lst, TYPE, CODE) \ + do { \ + struct list_s *_lst = (lst); \ + uintptr_t _i; \ + for (_i = _lst->count; _i--; ) { \ + TYPE item = (TYPE)_lst->items[_i]; \ + CODE; \ + } \ + } while (0) + +#define LIST_SETSIZE(n) (sizeof(struct list_s) + LIST_ITEMSSIZE(n)) +#define LIST_ITEMSSIZE(n) ((n) * sizeof(uintptr_t)) +#define LIST_OVERCNT(n) (33 + ((((n) / 2) * 3) | 1)) + +static struct list_s *list_create(void) +{ + uintptr_t initial_allocation = 32; + struct list_s *lst = malloc(LIST_SETSIZE(initial_allocation)); + if (lst == NULL) + abort(); + + lst->count = 0; + lst->last_allocated = initial_allocation - 1; + return lst; +} + +static struct list_s *_list_grow(struct list_s *lst, uintptr_t nalloc) +{ + nalloc = LIST_OVERCNT(nalloc); + lst = realloc(lst, LIST_SETSIZE(nalloc)); + if (lst == NULL) + abort(); + + lst->last_allocated = nalloc - 1; + return lst; +} + +/************************************************************/ + +#define GCFLAG_WRITE_BARRIER _STM_GCFLAG_WRITE_BARRIER + +static struct list_s *objects_pointing_to_nursery; + +void stm_setup(void) +{ + objects_pointing_to_nursery = list_create(); +} + +void stm_teardown(void) +{ + list_free(objects_pointing_to_nursery); +} + +void _stm_write_slowpath(object_t *obj) +{ + obj->gil_flags &= ~GCFLAG_WRITE_BARRIER; + LIST_APPEND(objects_pointing_to_nursery, obj); +} + +object_t *_stm_allocate_old(ssize_t size) +{ + char *p = malloc(size); + assert(p); + memset(p, 0, size); + ((object_t *)p)->gil_flags = STM_FLAGS_PREBUILT; + return (object_t *)p; +} + +object_t *_stm_allocate_external(ssize_t size) +{ + char *p = malloc(size); + assert(p); + memset(p, 0, size); + _stm_write_slowpath((object_t *)p); + return (object_t *)p; +} + +/************************************************************/ + + +#define NB_NURSERY_PAGES 1024 // 4MB +#define NURSERY_SIZE (NB_NURSERY_PAGES * 4096UL) + +char *_stm_nursery_base = NULL; +char *_stm_nursery_current = NULL; +char *_stm_nursery_end = NULL; + +static bool _is_in_nursery(object_t *obj) +{ + return ((char *)obj >= _stm_nursery_base && + (char *)obj < _stm_nursery_end); +} + +#define GCWORD_MOVED ((object_t *) -42) + +static void minor_trace_if_young(object_t **pobj) +{ + object_t *obj = *pobj; + object_t *nobj; + + if (obj == NULL) + return; + + if (_is_in_nursery(obj)) { + /* If the object was already seen here, its first word was set + to GCWORD_MOVED. In that case, the forwarding location, i.e. + where the object moved to, is stored in the second word in 'obj'. */ + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; + + if (pforwarded_array[0] == GCWORD_MOVED) { + *pobj = pforwarded_array[1]; /* already moved */ + return; + } + + /* We need to make a copy of this object. + */ + size_t size = stmcb_size_rounded_up(obj); + + nobj = malloc(size); + assert(nobj); + + /* Copy the object */ + memcpy(nobj, obj, size); + + /* Done copying the object. */ + //dprintf(("\t\t\t\t\t%p -> %p\n", obj, nobj)); + pforwarded_array[0] = GCWORD_MOVED; + pforwarded_array[1] = nobj; + *pobj = nobj; + } + + else { + /* The object was not in the nursery at all */ + return; + } + + /* Must trace the object later */ + LIST_APPEND(objects_pointing_to_nursery, nobj); +} + +static void collect_roots_in_nursery(void) +{ + object_t **current = _stm_tloc->shadowstack; + object_t **base = _stm_tloc->shadowstack_base; + while (current-- != base) { + minor_trace_if_young(current); + } + minor_trace_if_young(&_stm_tloc->thread_local_obj); +} + +static inline void _collect_now(object_t *obj) +{ + assert(!_is_in_nursery(obj)); + + /* We must not have GCFLAG_WRITE_BARRIER so far. Add it now. */ + assert(!(obj->gil_flags & GCFLAG_WRITE_BARRIER)); + obj->gil_flags |= GCFLAG_WRITE_BARRIER; + + /* Trace the 'obj' to replace pointers to nursery with pointers + outside the nursery, possibly forcing nursery objects out and + adding them to 'objects_pointing_to_nursery' as well. */ + stmcb_trace(obj, &minor_trace_if_young); +} + +static void collect_oldrefs_to_nursery(void) +{ + struct list_s *lst = objects_pointing_to_nursery; + + while (!list_is_empty(lst)) { + object_t *obj = (object_t *)list_pop_item(lst); + + _collect_now(obj); + + /* the list could have moved while appending */ + lst = objects_pointing_to_nursery; + } +} + +static void throw_away_nursery(void) +{ + if (_stm_nursery_base == NULL) { + _stm_nursery_base = malloc(NURSERY_SIZE); + assert(_stm_nursery_base); + _stm_nursery_end = _stm_nursery_base + NURSERY_SIZE; + } + + _stm_nursery_current = _stm_nursery_base; + memset(_stm_nursery_base, 0, NURSERY_SIZE); +} + +object_t *_stm_allocate_slowpath(ssize_t size_rounded_up) +{ + /* run minor collection */ + //fprintf(stderr, "minor collect\n"); + collect_roots_in_nursery(); + collect_oldrefs_to_nursery(); + throw_away_nursery(); + + char *p = _stm_nursery_current; + char *end = p + size_rounded_up; + assert(end <= _stm_nursery_end); + _stm_nursery_current = end; + return (object_t *)p; +} diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h new file mode 100644 --- /dev/null +++ b/gil-c7/stmgc.h @@ -0,0 +1,105 @@ +#include +#include +#include +#include +#include +#include +#include + +#define TLPREFIX /* nothing */ + + +typedef struct { /* empty */ } stm_jmpbuf_t; + +typedef struct object_s { + uint32_t gil_flags; +} object_t; + +typedef struct { + object_t **shadowstack; + object_t **shadowstack_base; + object_t *thread_local_obj; +} stm_thread_local_t; + +extern stm_thread_local_t *_stm_tloc; +extern char *_stm_nursery_current, *_stm_nursery_end; + +#ifdef NDEBUG +#define OPT_ASSERT(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) +#else +#define OPT_ASSERT(cond) assert(cond) +#endif +#define UNLIKELY(x) __builtin_expect(x, false) + +#define _STM_GCFLAG_WRITE_BARRIER 0x01 +#define _STM_FAST_ALLOC (66*1024) +#define STM_FLAGS_PREBUILT _STM_GCFLAG_WRITE_BARRIER + + +object_t *_stm_allocate_old(ssize_t size); + +object_t *_stm_allocate_external(ssize_t); +object_t *_stm_allocate_slowpath(ssize_t); + +inline static object_t *stm_allocate(ssize_t size_rounded_up) { + OPT_ASSERT(size_rounded_up >= 16); + OPT_ASSERT((size_rounded_up & 7) == 0); + + if (UNLIKELY(size_rounded_up >= _STM_FAST_ALLOC)) + return _stm_allocate_external(size_rounded_up); + + char *p = _stm_nursery_current; + char *end = p + size_rounded_up; + _stm_nursery_current = end; + if (UNLIKELY(end > _stm_nursery_end)) + return _stm_allocate_slowpath(size_rounded_up); + + return (object_t *)p; +} + +inline static void stm_register_thread_local(stm_thread_local_t *tl) { + tl->thread_local_obj = NULL; + tl->shadowstack_base = (object_t **)malloc(768*1024); + assert(tl->shadowstack_base); + tl->shadowstack = tl->shadowstack_base; +} +inline static void stm_unregister_thread_local(stm_thread_local_t *tl) { + free(tl->shadowstack_base); +} + +extern pthread_mutex_t _stm_gil; + +void stm_setup(void); +void stm_teardown(void); + +inline static void stm_start_inevitable_transaction(stm_thread_local_t *tl) { + if (pthread_mutex_lock(&_stm_gil) != 0) abort(); + _stm_tloc = tl; +} +inline static void stm_commit_transaction(void) { + _stm_tloc = NULL; + if (pthread_mutex_unlock(&_stm_gil) != 0) abort(); +} +inline static void stm_become_inevitable(const char *msg) { } +inline static void stm_read(object_t *ob) { } + +void _stm_write_slowpath(object_t *); + +inline static void stm_write(object_t *ob) { + if (UNLIKELY(ob->gil_flags & _STM_GCFLAG_WRITE_BARRIER)) + _stm_write_slowpath(ob); +} + +inline static char *_stm_real_address(object_t *ob) { return (char *)ob; } + +#define STM_START_TRANSACTION(tl, here) do { \ + (void)&(here); \ + stm_start_inevitable_transaction(tl); \ +} while (0) + +#define STM_PUSH_ROOT(tl, p) (*((tl).shadowstack++) = (object_t *)(p)) +#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))*(--(tl).shadowstack)) + + +extern ssize_t stmcb_size_rounded_up(struct object_s *); +extern void stmcb_trace(struct object_s *, void (object_t **)); From noreply at buildbot.pypy.org Sun Mar 2 19:08:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 19:08:31 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix zlib decompress flush Message-ID: <20140302180831.6F7771C0865@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69621:a7c8beafaa1b Date: 2014-03-02 13:06 -0500 http://bitbucket.org/pypy/pypy/changeset/a7c8beafaa1b/ Log: fix zlib decompress flush diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py --- a/pypy/module/zlib/interp_zlib.py +++ b/pypy/module/zlib/interp_zlib.py @@ -157,7 +157,6 @@ rzlib.deflateEnd(self.stream) self.stream = rzlib.null_stream - @unwrap_spec(data='bufferstr') def compress(self, data): """ @@ -181,7 +180,6 @@ raise zlib_error(self.space, e.msg) return self.space.wrap(result) - @unwrap_spec(mode=int) def flush(self, mode=rzlib.Z_FINISH): """ @@ -270,6 +268,15 @@ rzlib.inflateEnd(self.stream) self.stream = rzlib.null_stream + def _save_unconsumed_input(self, data, finished, unused_len): + unused_start = len(data) - unused_len + assert unused_start >= 0 + tail = data[unused_start:] + if finished: + self.unconsumed_tail = '' + self.unused_data += tail + else: + self.unconsumed_tail = tail @unwrap_spec(data='bufferstr', max_length=int) def decompress(self, data, max_length=0): @@ -290,41 +297,39 @@ try: self.lock() try: - result = rzlib.decompress(self.stream, data, - max_length = max_length) + result = rzlib.decompress(self.stream, data, max_length=max_length) finally: self.unlock() except rzlib.RZlibError, e: raise zlib_error(self.space, e.msg) string, finished, unused_len = result - unused_start = len(data) - unused_len - assert unused_start >= 0 - tail = data[unused_start:] - if finished: - self.unconsumed_tail = '' - self.unused_data += tail - else: - self.unconsumed_tail = tail + self._save_unconsumed_input(data, finished, unused_len) return self.space.wrap(string) - - @unwrap_spec(length=int) - def flush(self, length=sys.maxint): + @unwrap_spec(mode="c_int") + def flush(self, mode=rzlib.Z_FINISH): """ flush( [length] ) -- This is kept for backward compatibility, because each call to decompress() immediately returns as much data as possible. """ - if length <= 0: - raise OperationError(self.space.w_ValueError, self.space.wrap( - "length must be greater than zero")) - # We could call rzlib.decompress(self.stream, '', rzlib.Z_FINISH) - # which would complain if the input stream so far is not complete; - # however CPython's zlib module does not behave like that. - # I could not figure out a case in which flush() in CPython - # doesn't simply return an empty string without complaining. - return self.space.wrap("") + if mode == rzlib.Z_NO_FLUSH: + return space.wrap("") + + data = self.unconsumed_tail + try: + self.lock() + try: + result = rzlib.decompress(self.stream, data, mode) + finally: + self.unlock() + except rzlib.RZlibError, e: + raise zlib_error(self.space, e.msg) + + string, finished, unused_len = result + self._save_unconsumed_input(data, finished, unused_len) + return self.space.wrap(string) @unwrap_spec(wbits=int) diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -1,4 +1,3 @@ - """ Tests for the zlib module. """ @@ -12,7 +11,7 @@ from pypy.module.zlib import interp_zlib except ImportError: import py; py.test.skip("no zlib C library on this machine") - + def test_unsigned_to_signed_32bit(): assert interp_zlib.unsigned_to_signed_32bit(123) == 123 assert interp_zlib.unsigned_to_signed_32bit(2**31) == -2**31 @@ -52,7 +51,6 @@ assert self.zlib.crc32('\0') == -771559539 assert self.zlib.crc32('hello, world.') == -936931198 - def test_crc32_start_value(self): """ When called with a string and an integer, zlib.crc32 should compute the @@ -94,7 +92,6 @@ assert self.zlib.adler32('hello, world.') == 571147447 assert self.zlib.adler32('x' * 23) == -2122904887 - def test_adler32_start_value(self): """ When called with a string and an integer, zlib.adler32 should compute @@ -114,7 +111,6 @@ assert self.zlib.adler32('foo', -1) == 45547858 assert self.zlib.adler32('foo', 99999999999999999999999) == -114818734 - def test_invalidLevel(self): """ zlib.compressobj should raise ValueError when an out of bounds level is @@ -123,7 +119,6 @@ raises(ValueError, self.zlib.compressobj, -2) raises(ValueError, self.zlib.compressobj, 10) - def test_compression(self): """ zlib.compressobj should return an object which can be used to compress @@ -134,7 +129,6 @@ bytes += compressor.flush() assert bytes == self.compressed - def test_decompression(self): """ zlib.decompressobj should return an object which can be used to @@ -145,7 +139,6 @@ bytes += decompressor.flush() assert bytes == self.expanded - def test_compress(self): """ Test the zlib.compress() function. @@ -153,7 +146,6 @@ bytes = self.zlib.compress(self.expanded) assert bytes == self.compressed - def test_decompress(self): """ Test the zlib.decompress() function. @@ -161,7 +153,6 @@ bytes = self.zlib.decompress(self.compressed) assert bytes == self.expanded - def test_decompress_invalid_input(self): """ Try to feed garbage to zlib.decompress(). @@ -169,7 +160,6 @@ raises(self.zlib.error, self.zlib.decompress, self.compressed[:-2]) raises(self.zlib.error, self.zlib.decompress, 'foobar') - def test_unused_data(self): """ Try to feed too much data to zlib.decompress(). @@ -179,6 +169,8 @@ s = d.decompress(self.compressed + 'extrastuff') assert s == self.expanded assert d.unused_data == 'extrastuff' + assert d.flush() == '' + assert d.unused_data == 'extrastuff' # try again with several decompression steps d = self.zlib.decompressobj() s1 = d.decompress(self.compressed[:10]) @@ -192,7 +184,6 @@ assert d.unused_data == ('spam' * 100) + ('egg' * 50) assert s4 == '' - def test_max_length(self): """ Test the max_length argument of the decompress() method @@ -206,7 +197,6 @@ data = d.unconsumed_tail assert not data - def test_buffer(self): """ We should be able to pass buffer objects instead of strings. @@ -229,3 +219,17 @@ bytes = self.zlib.decompress(buffer(self.compressed)) assert bytes == self.expanded + + def test_flush_with_freed_input(self): + # Issue #16411: decompressor accesses input to last decompress() call + # in flush(), even if this object has been freed in the meanwhile. + input1 = 'abcdefghijklmnopqrstuvwxyz' + input2 = 'QWERTYUIOPASDFGHJKLZXCVBNM' + data = self.zlib.compress(input1) + dco = self.zlib.decompressobj() + dco.decompress(data, 1) + del data + data = self.zlib.compress(input2) + assert dco.flush() == input1[1:] + assert dco.unused_data == '' + assert dco.unconsumed_tail == '' From noreply at buildbot.pypy.org Sun Mar 2 19:32:18 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 2 Mar 2014 19:32:18 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: random fixes Message-ID: <20140302183218.72E9F1C0865@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69622:35b0897aa1df Date: 2014-03-02 13:29 -0500 http://bitbucket.org/pypy/pypy/changeset/35b0897aa1df/ Log: random fixes diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -69,7 +69,7 @@ """ try: uid = space.int_w(w_uid) - if uid < -1 or uid > most_pos_value_of(uid_t): + if uid < -1 or uid > widen(most_pos_value_of(uid_t)): raise OperationError(space.w_OverflowError, None) except OperationError, e: if e.match(space, space.w_OverflowError): diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py --- a/pypy/module/zlib/interp_zlib.py +++ b/pypy/module/zlib/interp_zlib.py @@ -180,7 +180,7 @@ raise zlib_error(self.space, e.msg) return self.space.wrap(result) - @unwrap_spec(mode=int) + @unwrap_spec(mode="c_int") def flush(self, mode=rzlib.Z_FINISH): """ flush( [mode] ) -- Return a string containing any remaining compressed @@ -315,7 +315,7 @@ data as possible. """ if mode == rzlib.Z_NO_FLUSH: - return space.wrap("") + return self.space.wrap("") data = self.unconsumed_tail try: diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -126,6 +126,7 @@ """ compressor = self.zlib.compressobj() bytes = compressor.compress(self.expanded) + raises(OverflowError, compressor.flush, 2**31) bytes += compressor.flush() assert bytes == self.compressed @@ -136,6 +137,7 @@ """ decompressor = self.zlib.decompressobj() bytes = decompressor.decompress(self.compressed) + raises(OverflowError, decompressor.flush, 2**31) bytes += decompressor.flush() assert bytes == self.expanded From noreply at buildbot.pypy.org Sun Mar 2 19:46:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 19:46:57 +0100 (CET) Subject: [pypy-commit] stmgc default: Start stm_setup_prebuilt() Message-ID: <20140302184657.439EA1C0865@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r926:cd8fc6f649af Date: 2014-03-02 19:46 +0100 http://bitbucket.org/pypy/stmgc/changeset/cd8fc6f649af/ Log: Start stm_setup_prebuilt() diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -101,12 +101,12 @@ object_t *_stm_allocate_old(ssize_t size_rounded_up) { - /* only for tests */ + /* only for tests xxx but stm_setup_prebuilt() uses this now too */ char *p = allocate_outside_nursery_large(size_rounded_up); memset(p, 0, size_rounded_up); object_t *o = (object_t *)(p - stm_object_pages); - o->stm_flags = STM_FLAGS_PREBUILT; + o->stm_flags = GCFLAG_WRITE_BARRIER; if (testing_prebuilt_objs == NULL) testing_prebuilt_objs = list_create(); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -96,7 +96,7 @@ char *allocated = allocate_outside_nursery_large(size); nobj = (object_t *)(allocated - stm_object_pages); - /* Copy the object */ + /* Copy the object */ char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); memcpy(realnobj, realobj, size); diff --git a/c7/stm/prebuilt.c b/c7/stm/prebuilt.c --- a/c7/stm/prebuilt.c +++ b/c7/stm/prebuilt.c @@ -3,80 +3,36 @@ #endif -static uint64_t prebuilt_readmarkers_start = 0; -static uint64_t prebuilt_readmarkers_end = 0; -static uint64_t prebuilt_objects_start = 0; +#define GCWORD_PREBUILT_MOVED ((object_t *) 42) -/* XXX NOT TESTED, AND NOT WORKING RIGHT NOW */ +object_t *stm_setup_prebuilt(object_t *staticobj_invalid) +{ + /* All variable names in "_invalid" here mean that although the + type is really "object_t *", it should not actually be accessed + via %gs. -void stm_copy_prebuilt_objects(object_t *target, char *source, ssize_t size) -{ - /* Initialize a region of 'size' bytes at the 'target' address, - containing prebuilt objects copied from 'source'. The caller - must ensure that the 'target' address is valid. It might be - called several times but care must be taken not to overlap the - ranges. The exact rules are a bit complicated: + If the object was already moved, its first word was set to + GCWORD_PREBUILT_MOVED. In that case, the forwarding location, + i.e. where the object moved to, is stored in the second word. + */ + uintptr_t objaddr = (uintptr_t)staticobj_invalid; + struct object_s *obj = (struct object_s *)objaddr; + object_t **pforwarded_array = (object_t **)objaddr; - - the range [target, target + size] must be inside the - range [131072, FIRST_READMARKER_PAGE*4096] - - - the range [target / 16, (target + size) / 16] will be - used by read markers, so it must be fully before the - range [target, target + size]. - - The objects themselves can contain more pointers to other - prebuilt objects. Their stm_flags field must be initialized - with STM_FLAGS_PREBUILT. - */ - - uint64_t utarget = (uint64_t)target; - uint64_t rm_start = utarget / 16; - uint64_t rm_end = (utarget + size + 15) / 16; - - if (rm_start < 8192 || rm_end > (utarget & ~4095) || - utarget + size > FIRST_READMARKER_PAGE * 4096UL) { - fprintf(stderr, - "stm_copy_prebuilt_objects: invalid range (0x%lx, 0x%lx)\n", - (long)utarget, (long)size); - abort(); + if (pforwarded_array[0] == GCWORD_PREBUILT_MOVED) { + return pforwarded_array[1]; /* already moved */ } - if (prebuilt_readmarkers_start == 0) { - prebuilt_readmarkers_start = rm_start; - prebuilt_readmarkers_end = rm_end; - prebuilt_objects_start = utarget & ~4095; - } - else { - if (prebuilt_readmarkers_start > rm_start) - prebuilt_readmarkers_start = rm_start; - if (prebuilt_readmarkers_end < rm_end) - prebuilt_readmarkers_end = rm_end; - if (prebuilt_objects_start > (utarget & ~4095)) - prebuilt_objects_start = utarget & ~4095; + /* We need to make a copy of this object. */ + size_t size = stmcb_size_rounded_up(obj); + object_t *nobj = _stm_allocate_old(size); - if (prebuilt_readmarkers_end > prebuilt_objects_start) { - fprintf(stderr, - "stm_copy_prebuilt_objects: read markers ending at 0x%lx " - "overlap with prebuilt objects starting at 0x%lx\n", - (long)prebuilt_readmarkers_end, - (long)prebuilt_objects_start); - abort(); - } - } + /* Copy the object */ + char *realnobj = REAL_ADDRESS(stm_object_pages, nobj); + memcpy(realnobj, (char *)objaddr, size); - uint64_t start_page = utarget / 4096; - uint64_t end_page = (utarget + size + 4095) / 4096; - pages_initialize_shared(start_page, end_page - start_page); + // XXX REFERENCES HERE - char *segment_base = get_segment_base(0); - memcpy(REAL_ADDRESS(segment_base, utarget), source, size); + return nobj; } - -#if 0 -static void reset_transaction_read_version_prebuilt(void) -{ - memset(REAL_ADDRESS(STM_SEGMENT->segment_base, prebuilt_readmarkers_start), - 0, prebuilt_readmarkers_end - prebuilt_readmarkers_start); -} -#endif diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -100,7 +100,6 @@ #define _STM_GCFLAG_WRITE_BARRIER 0x01 #define _STM_NSE_SIGNAL_MAX 1 #define _STM_FAST_ALLOC (66*1024) -#define STM_FLAGS_PREBUILT _STM_GCFLAG_WRITE_BARRIER /* ==================== HELPERS ==================== */ @@ -253,6 +252,15 @@ /* Forces a collection. */ void stm_collect(long level); +/* Prepare an immortal "prebuilt" object managed by the GC. Takes a + pointer to an 'object_t', which should not actually be a GC-managed + structure but a real static structure. Returns the equivalent + GC-managed pointer. Works by copying it into the GC pages, following + and fixing all pointers it contains, by doing stm_setup_prebuilt() on + each of them recursively. (Note that this will leave garbage in the + static structure, but it should never be used anyway.) */ +object_t *stm_setup_prebuilt(object_t *); + /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -27,7 +27,7 @@ void stm_teardown(void); void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); -//void stm_copy_prebuilt_objects(object_t *target, char *source, ssize_t size); +object_t *stm_setup_prebuilt(object_t *); bool _checked_stm_write(object_t *obj); bool _stm_was_read(object_t *obj); diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -33,7 +33,6 @@ #define _STM_GCFLAG_WRITE_BARRIER 0x01 #define _STM_FAST_ALLOC (66*1024) -#define STM_FLAGS_PREBUILT _STM_GCFLAG_WRITE_BARRIER object_t *_stm_allocate_old(ssize_t size); From noreply at buildbot.pypy.org Sun Mar 2 20:03:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 20:03:13 +0100 (CET) Subject: [pypy-commit] stmgc default: Finish prebuilt.c, according to its test (which I forgot to check-in earlier) Message-ID: <20140302190313.B43C21C0865@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r927:d8c4f5b49016 Date: 2014-03-02 20:03 +0100 http://bitbucket.org/pypy/stmgc/changeset/d8c4f5b49016/ Log: Finish prebuilt.c, according to its test (which I forgot to check-in earlier) diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -536,11 +536,10 @@ major_clear_write_locks(); /* marking */ - mark_objects_to_trace = list_create(); + LIST_CREATE(mark_objects_to_trace); mark_visit_from_modified_objects(); mark_visit_from_roots(); - list_free(mark_objects_to_trace); - mark_objects_to_trace = NULL; + LIST_FREE(mark_objects_to_trace); /* cleanup */ clean_up_segment_lists(); diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -15,6 +15,7 @@ free(lst); } +#define LIST_CREATE(lst) ((lst) = list_create()) #define LIST_FREE(lst) (list_free(lst), (lst) = NULL) diff --git a/c7/stm/prebuilt.c b/c7/stm/prebuilt.c --- a/c7/stm/prebuilt.c +++ b/c7/stm/prebuilt.c @@ -5,23 +5,26 @@ #define GCWORD_PREBUILT_MOVED ((object_t *) 42) +static struct list_s *prebuilt_objects_to_trace; -object_t *stm_setup_prebuilt(object_t *staticobj_invalid) + +static void prebuilt_trace(object_t **pstaticobj_invalid) { - /* All variable names in "_invalid" here mean that although the - type is really "object_t *", it should not actually be accessed - via %gs. + uintptr_t objaddr = (uintptr_t)*pstaticobj_invalid; + struct object_s *obj = (struct object_s *)objaddr; - If the object was already moved, its first word was set to + if (obj == NULL) + return; + + /* If the object was already moved, its first word was set to GCWORD_PREBUILT_MOVED. In that case, the forwarding location, i.e. where the object moved to, is stored in the second word. */ - uintptr_t objaddr = (uintptr_t)staticobj_invalid; - struct object_s *obj = (struct object_s *)objaddr; object_t **pforwarded_array = (object_t **)objaddr; if (pforwarded_array[0] == GCWORD_PREBUILT_MOVED) { - return pforwarded_array[1]; /* already moved */ + *pstaticobj_invalid = pforwarded_array[1]; /* already moved */ + return; } /* We need to make a copy of this object. */ @@ -32,7 +35,33 @@ char *realnobj = REAL_ADDRESS(stm_object_pages, nobj); memcpy(realnobj, (char *)objaddr, size); - // XXX REFERENCES HERE + /* Mark the original object */ + pforwarded_array[0] = GCWORD_PREBUILT_MOVED; + pforwarded_array[1] = nobj; - return nobj; + /* Done */ + *pstaticobj_invalid = nobj; + LIST_APPEND(prebuilt_objects_to_trace, realnobj); } + +object_t *stm_setup_prebuilt(object_t *staticobj_invalid) +{ + /* All variable names in "_invalid" here mean that although the + type is really "object_t *", it should not actually be accessed + via %gs. + */ + LIST_CREATE(prebuilt_objects_to_trace); + + object_t *obj = staticobj_invalid; + prebuilt_trace(&obj); + + while (!list_is_empty(prebuilt_objects_to_trace)) { + struct object_s *realobj1 = + (struct object_s *)list_pop_item(prebuilt_objects_to_trace); + stmcb_trace(realobj1, &prebuilt_trace); + } + + LIST_FREE(prebuilt_objects_to_trace); + + return obj; +} diff --git a/c7/test/test_prebuilt.py b/c7/test/test_prebuilt.py new file mode 100644 --- /dev/null +++ b/c7/test/test_prebuilt.py @@ -0,0 +1,63 @@ +from support import * +import py +import weakref + + +prebuilt_dict = weakref.WeakKeyDictionary() + +def _prebuilt(size, tid): + assert size >= 16 + assert (size & 7) == 0 + myobj1 = ffi.new("char[]", size) + myobj = ffi.cast("object_t *", myobj1) + prebuilt_dict[myobj] = myobj1 + ffi.cast("uint32_t *", myobj)[1] = tid + return myobj + +def prebuilt(size): + return _prebuilt(size, 42 + size) + +def prebuilt_refs(n): + return _prebuilt(HDR + n * WORD, 421420 + n) + + +class TestPrebuilt(BaseTest): + + def test_simple_prebuilt(self): + static1 = prebuilt(16) + ffi.cast("char *", static1)[8:11] = 'ABC' + print static1 + lp = lib.stm_setup_prebuilt(static1) + # + self.start_transaction() + assert stm_get_char(lp, 8) == 'A' + assert stm_get_char(lp, 9) == 'B' + assert stm_get_char(lp, 10) == 'C' + + def test_prebuilt_rec(self): + static1 = prebuilt_refs(2) + static2 = prebuilt(16) + ffi.cast("char *", static2)[8:11] = 'ABC' + ffi.cast("object_t **", static1)[1] = static2 + lp1 = lib.stm_setup_prebuilt(static1) + # + self.start_transaction() + assert not stm_get_ref(lp1, 1) + lp2 = stm_get_ref(lp1, 0) + print lp2 + assert stm_get_char(lp2, 8) == 'A' + assert stm_get_char(lp2, 9) == 'B' + assert stm_get_char(lp2, 10) == 'C' + + def test_prebuilt_rec_cycle(self): + static1 = prebuilt_refs(1) + static2 = prebuilt_refs(1) + ffi.cast("object_t **", static1)[1] = static2 + ffi.cast("object_t **", static2)[1] = static1 + lp1 = lib.stm_setup_prebuilt(static1) + # + self.start_transaction() + lp2 = stm_get_ref(lp1, 0) + print lp2 + assert lp2 != lp1 + assert stm_get_ref(lp2, 0) == lp1 From noreply at buildbot.pypy.org Sun Mar 2 20:29:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 20:29:15 +0100 (CET) Subject: [pypy-commit] stmgc default: Add passing tests Message-ID: <20140302192915.AD0941C0865@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r928:90c311bc6901 Date: 2014-03-02 20:15 +0100 http://bitbucket.org/pypy/stmgc/changeset/90c311bc6901/ Log: Add passing tests diff --git a/c7/test/test_prebuilt.py b/c7/test/test_prebuilt.py --- a/c7/test/test_prebuilt.py +++ b/c7/test/test_prebuilt.py @@ -61,3 +61,21 @@ print lp2 assert lp2 != lp1 assert stm_get_ref(lp2, 0) == lp1 + + def test_multiple_calls_to_stm_setup_prebuilt_1(self, reverse=False): + static1 = prebuilt_refs(1) + static2 = prebuilt_refs(1) + ffi.cast("object_t **", static1)[1] = static2 + if not reverse: + lp1 = lib.stm_setup_prebuilt(static1) + lp2 = lib.stm_setup_prebuilt(static2) + else: + lp2 = lib.stm_setup_prebuilt(static2) + lp1 = lib.stm_setup_prebuilt(static1) + # + self.start_transaction() + assert stm_get_ref(lp1, 0) == lp2 + assert stm_get_ref(lp2, 0) == ffi.NULL + + def test_multiple_calls_to_stm_setup_prebuilt_2(self): + self.test_multiple_calls_to_stm_setup_prebuilt_1(reverse=True) From noreply at buildbot.pypy.org Sun Mar 2 20:29:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 20:29:16 +0100 (CET) Subject: [pypy-commit] stmgc default: Ah, I knew there was a bug. Message-ID: <20140302192916.C3AB51C0865@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r929:d6bde44bb839 Date: 2014-03-02 20:20 +0100 http://bitbucket.org/pypy/stmgc/changeset/d6bde44bb839/ Log: Ah, I knew there was a bug. diff --git a/c7/stm/prebuilt.c b/c7/stm/prebuilt.c --- a/c7/stm/prebuilt.c +++ b/c7/stm/prebuilt.c @@ -35,6 +35,10 @@ char *realnobj = REAL_ADDRESS(stm_object_pages, nobj); memcpy(realnobj, (char *)objaddr, size); + /* Fix the flags in the copied object, asserting that it was zero so far */ + assert(nobj->stm_flags == 0); + nobj->stm_flags = GCFLAG_WRITE_BARRIER; + /* Mark the original object */ pforwarded_array[0] = GCWORD_PREBUILT_MOVED; pforwarded_array[1] = nobj; diff --git a/c7/test/test_prebuilt.py b/c7/test/test_prebuilt.py --- a/c7/test/test_prebuilt.py +++ b/c7/test/test_prebuilt.py @@ -61,6 +61,8 @@ print lp2 assert lp2 != lp1 assert stm_get_ref(lp2, 0) == lp1 + assert lib._stm_get_flags(lp1) == lib._STM_GCFLAG_WRITE_BARRIER + assert lib._stm_get_flags(lp2) == lib._STM_GCFLAG_WRITE_BARRIER def test_multiple_calls_to_stm_setup_prebuilt_1(self, reverse=False): static1 = prebuilt_refs(1) @@ -76,6 +78,8 @@ self.start_transaction() assert stm_get_ref(lp1, 0) == lp2 assert stm_get_ref(lp2, 0) == ffi.NULL + assert lib._stm_get_flags(lp1) == lib._STM_GCFLAG_WRITE_BARRIER + assert lib._stm_get_flags(lp2) == lib._STM_GCFLAG_WRITE_BARRIER def test_multiple_calls_to_stm_setup_prebuilt_2(self): self.test_multiple_calls_to_stm_setup_prebuilt_1(reverse=True) From noreply at buildbot.pypy.org Sun Mar 2 20:29:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 20:29:17 +0100 (CET) Subject: [pypy-commit] stmgc default: Shrink the init_prebuilt_xxx functions by using stm_setup_prebuilt(). Yay. Message-ID: <20140302192917.CF61E1C0865@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r930:8852b39a1b63 Date: 2014-03-02 20:29 +0100 http://bitbucket.org/pypy/stmgc/changeset/8852b39a1b63/ Log: Shrink the init_prebuilt_xxx functions by using stm_setup_prebuilt(). Yay. diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -186,6 +186,8 @@ #define _du_read1(p1) stm_read((object_t *)(p1)) #define _du_write1(p1) stm_write((object_t *)(p1)) +#define INIT_PREBUILT(p) ((typeof(p))stm_setup_prebuilt((object_t *)(p))) + #ifndef NDEBUG # define _check_not_free(ob) \ diff --git a/duhton/frame.c b/duhton/frame.c --- a/duhton/frame.c +++ b/duhton/frame.c @@ -47,18 +47,12 @@ void init_prebuilt_frame_objects(void) { - du_empty_framenode = (DuFrameNodeObject *) - _stm_allocate_old(sizeof(DuFrameNodeObject)); - du_empty_framenode->ob_base.type_id = DUTYPE_FRAMENODE; - du_empty_framenode->ob_count = 0; + static DuFrameNodeObject empty_framenode = { {.type_id=DUTYPE_FRAMENODE} }; + static DuFrameObject g = { {.type_id=DUTYPE_FRAME}, + .ob_nodes=&empty_framenode }; - DuFrameObject *g = (DuFrameObject *) - _stm_allocate_old(sizeof(DuFrameObject)); - g->ob_base.type_id = DUTYPE_FRAME; - g->ob_nodes = du_empty_framenode; - Du_Globals = (DuObject *)g; - - _du_save2(du_empty_framenode, Du_Globals); + du_empty_framenode = INIT_PREBUILT(&empty_framenode); + Du_Globals = (DuObject *)INIT_PREBUILT(&g); } DuObject *DuFrame_New() diff --git a/duhton/listobject.c b/duhton/listobject.c --- a/duhton/listobject.c +++ b/duhton/listobject.c @@ -205,12 +205,8 @@ void init_prebuilt_list_objects(void) { - du_empty_tuple = (DuTupleObject *) - _stm_allocate_old(sizeof(DuTupleObject)); - du_empty_tuple->ob_base.type_id = DUTYPE_TUPLE; - du_empty_tuple->ob_count = 0; - du_empty_tuple->ob_capacity = 0; - _du_save1(du_empty_tuple); + static DuTupleObject empty_tuple = { { { }, DUTYPE_TUPLE } }; + du_empty_tuple = INIT_PREBUILT(&empty_tuple); } DuObject *DuList_New() diff --git a/duhton/object.c b/duhton/object.c --- a/duhton/object.c +++ b/duhton/object.c @@ -69,9 +69,8 @@ void init_prebuilt_object_objects(void) { - Du_None = (DuObject *)_stm_allocate_old(sizeof(DuObject)); - Du_None->type_id = DUTYPE_NONE; - _du_save1(Du_None); + static DuObject none = { { }, DUTYPE_NONE }; + Du_None = INIT_PREBUILT(&none); } void Du_FatalError(char *msg, ...) diff --git a/duhton/symbol.c b/duhton/symbol.c --- a/duhton/symbol.c +++ b/duhton/symbol.c @@ -55,13 +55,8 @@ void init_prebuilt_symbol_objects(void) { - _Du_AllSymbols = (DuSymbolObject *) - _stm_allocate_old(sizeof(DuSymbolObject)); - _Du_AllSymbols->ob_base.type_id = DUTYPE_SYMBOL; - _Du_AllSymbols->myid = 0; - _Du_AllSymbols->name = ""; - _Du_AllSymbols->next = NULL; - _du_save1(_Du_AllSymbols); + static DuSymbolObject allsymbols = { {.type_id=DUTYPE_SYMBOL}, .name="" }; + _Du_AllSymbols = INIT_PREBUILT(&allsymbols); } DuObject *DuSymbol_FromString(const char *name) diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -7,15 +7,11 @@ void init_prebuilt_transaction_objects(void) { + static DuConsObject pending = { {.type_id = DUTYPE_CONS} }; + du_pending_transactions = INIT_PREBUILT(&pending); + assert(Du_None); /* already created */ - - du_pending_transactions = (DuConsObject *) - _stm_allocate_old(sizeof(DuConsObject)); - du_pending_transactions->ob_base.type_id = DUTYPE_CONS; - du_pending_transactions->car = NULL; du_pending_transactions->cdr = Du_None; - - _du_save1(du_pending_transactions); }; static pthread_mutex_t mutex_sleep = PTHREAD_MUTEX_INITIALIZER; From noreply at buildbot.pypy.org Sun Mar 2 21:59:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Mar 2014 21:59:41 +0100 (CET) Subject: [pypy-commit] stmgc default: Update TODO Message-ID: <20140302205941.908571C0865@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r931:4e56dcb88db9 Date: 2014-03-02 21:59 +0100 http://bitbucket.org/pypy/stmgc/changeset/4e56dcb88db9/ Log: Update TODO diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -3,4 +3,6 @@ - write barrier for big arrays -- prebuilt objects: stm_setup_prebuilt(array_of_"object_t*", length); +- hash, id +- weakrefs +- finalizers From noreply at buildbot.pypy.org Mon Mar 3 01:22:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 01:22:20 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: test this value as well Message-ID: <20140303002220.72AEA1C0865@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69623:3fbce3747a08 Date: 2014-03-02 19:12 -0500 http://bitbucket.org/pypy/pypy/changeset/3fbce3747a08/ Log: test this value as well diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -541,6 +541,9 @@ y = Y() y.a = 10 assert y.a == 10 + val = (1 << (s[1] - 1)) | 1 + y.a = val + assert y.a == val y.free() def test_invalid_bitfields(self): From noreply at buildbot.pypy.org Mon Mar 3 02:05:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 02:05:30 +0100 (CET) Subject: [pypy-commit] pypy default: update cffi's _backend_test_c.py Message-ID: <20140303010530.2912A1D287A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69624:1b624a48fffc Date: 2014-03-02 20:04 -0500 http://bitbucket.org/pypy/pypy/changeset/1b624a48fffc/ Log: update cffi's _backend_test_c.py diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -359,6 +359,9 @@ assert x.load_function(BVoidP, 'strcpy') py.test.raises(KeyError, x.load_function, BVoidP, 'xxx_this_function_does_not_exist') + # the next one is from 'libm', not 'libc', but we assume + # that it is already loaded too, so it should work + assert x.load_function(BVoidP, 'sqrt') def test_hash_differences(): BChar = new_primitive_type("char") From noreply at buildbot.pypy.org Mon Mar 3 02:05:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 02:05:53 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix whatsnew Message-ID: <20140303010553.7F7EE1D287A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69625:ffd14bc398d2 Date: 2014-03-02 19:56 -0500 http://bitbucket.org/pypy/pypy/changeset/ffd14bc398d2/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -94,3 +94,6 @@ .. branch: test-58c3d8552833 Fix for getarrayitem_gc_pure optimization + +.. branch: stdlib-2.7.5 +.. branch: vendor/stdlib From noreply at buildbot.pypy.org Mon Mar 3 02:05:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 02:05:55 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: merge default Message-ID: <20140303010555.506481D287A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69626:81b36fff2b15 Date: 2014-03-02 20:05 -0500 http://bitbucket.org/pypy/pypy/changeset/81b36fff2b15/ Log: merge default diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -897,11 +897,13 @@ if (c_api_object == NULL) return; if (!PyCapsule_CheckExact(c_api_object)) { + Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); return; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + Py_DECREF(c_api_object); } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -315,27 +315,27 @@ .. _`trace example`: -Tracing bytecode and operations on objects -++++++++++++++++++++++++++++++++++++++++++ +Tracing bytecodes ++++++++++++++++++ -You can use the trace object space to monitor the interpretation -of bytecodes in connection with object space operations. To enable -it, set ``__pytrace__=1`` on the interactive PyPy console:: +You can use a simple tracing mode to monitor the interpretation of +bytecodes. To enable it, set ``__pytrace__ = 1`` on the interactive +PyPy console:: >>>> __pytrace__ = 1 Tracing enabled - >>>> a = 1 + 2 - |- <<<< enter a = 1 + 2 @ 1 >>>> - |- 0 LOAD_CONST 0 (W_IntObject(1)) - |- 3 LOAD_CONST 1 (W_IntObject(2)) - |- 6 BINARY_ADD - |- add(W_IntObject(1), W_IntObject(2)) -> W_IntObject(3) - |- 7 STORE_NAME 0 (a) - |- hash(W_StringObject('a')) -> W_IntObject(-468864544) - |- int_w(W_IntObject(-468864544)) -> -468864544 - |-10 LOAD_CONST 2 () - |-13 RETURN_VALUE - |- <<<< leave a = 1 + 2 @ 1 >>>> + >>>> x = 5 + : LOAD_CONST 0 (5) + : STORE_NAME 0 (x) + : LOAD_CONST 1 (None) + : RETURN_VALUE 0 + >>>> x + : LOAD_NAME 0 (x) + : PRINT_EXPR 0 + 5 + : LOAD_CONST 0 (None) + : RETURN_VALUE 0 + >>>> Demos ------- diff --git a/pypy/interpreter/interactive.py b/pypy/interpreter/interactive.py --- a/pypy/interpreter/interactive.py +++ b/pypy/interpreter/interactive.py @@ -189,8 +189,7 @@ try: code.exec_code(self.space, self.w_globals, self.w_globals) finally: - if self.tracelevel: - self.space.unsettrace() + self.unsettrace() self.checktrace() # run doit() in an exception-catching box @@ -203,7 +202,38 @@ def settrace(self): if self.tracelevel: - self.space.settrace() + ec = self.space.getexecutioncontext() + if not hasattr(self, '_orig_bytecode_only_trace'): + self._orig_bytecode_only_trace = ec.bytecode_only_trace + ec.bytecode_only_trace = self._do_bytecode_only_trace + + def unsettrace(self): + if self.tracelevel: + ec = self.space.getexecutioncontext() + ec.bytecode_only_trace = self._orig_bytecode_only_trace + + def _do_bytecode_only_trace(self, frame): + from pypy.tool.pydis import Bytecode, HAVE_ARGUMENT + + if frame.hide(): + return + + self.unsettrace() + next_instr = frame.last_instr + opcode = ord(frame.pycode.co_code[next_instr]) + + oparg = 0 + if opcode >= HAVE_ARGUMENT: + lo = ord(frame.pycode.co_code[next_instr+1]) + hi = ord(frame.pycode.co_code[next_instr+2]) + oparg = (hi * 256) | lo + + class fake: + code = frame.pycode + bytecode = Bytecode(fake, next_instr, oparg, 0) + print '\t%-19s %s' % (str(frame.pycode.co_name) + ':', + bytecode.repr_with_space(self.space)) + self.settrace() def checktrace(self): s = self.space @@ -213,11 +243,11 @@ s.wrap("__pytrace__"))) if self.tracelevel > 0 and tracelevel == 0: - s.reset_trace() + self.unsettrace() print "Tracing disabled" if self.tracelevel == 0 and tracelevel > 0: - self.space.unsettrace() + self.unsettrace() print "Tracing enabled" self.tracelevel = tracelevel diff --git a/pypy/interpreter/test/test_zpy.py b/pypy/interpreter/test/test_zpy.py --- a/pypy/interpreter/test/test_zpy.py +++ b/pypy/interpreter/test/test_zpy.py @@ -7,10 +7,13 @@ pypypath = py.path.local(pypy.__file__).dirpath("bin", "pyinteractive.py") -def run(*args): +def run(*args, **kwds): + stdin = kwds.pop('stdin', '') + assert not kwds argslist = map(str, args) - popen = subprocess.Popen(argslist, stdout=subprocess.PIPE) - stdout, stderr = popen.communicate() + popen = subprocess.Popen(argslist, stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + stdout, stderr = popen.communicate(stdin) return stdout @@ -99,3 +102,19 @@ stderr=subprocess.PIPE) _, stderr = popen.communicate() assert stderr.endswith('KeyError: \n') + + +def test_pytrace(): + output = run(sys.executable, pypypath, '-S', + stdin="__pytrace__ = 1\nx = 5\nx") + assert ('\t: LOAD_CONST 0 (5)\n' + '\t: STORE_NAME 0 (x)\n' + '\t: LOAD_CONST 1 (None)\n' + '\t: RETURN_VALUE 0 \n' + '>>>> ') in output + assert ('\t: LOAD_NAME 0 (x)\n' + '\t: PRINT_EXPR 0 \n' + # '5\n' --- this line sent to stderr + '\t: LOAD_CONST 0 (None)\n' + '\t: RETURN_VALUE 0 \n' + '>>>> ') in output diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -359,6 +359,9 @@ assert x.load_function(BVoidP, 'strcpy') py.test.raises(KeyError, x.load_function, BVoidP, 'xxx_this_function_does_not_exist') + # the next one is from 'libm', not 'libc', but we assume + # that it is already loaded too, so it should work + assert x.load_function(BVoidP, 'sqrt') def test_hash_differences(): BChar = new_primitive_type("char") diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_function.py @@ -35,6 +35,12 @@ def getvalue(self): return self._value +lib_m = 'm' +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' class TestFunction(object): Backend = CTypesBackend @@ -44,18 +50,16 @@ ffi.cdef(""" double sin(double x); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x == math.sin(1.23) def test_sinf(self): - if sys.platform == 'win32': - py.test.skip("no 'sinf'") ffi = FFI(backend=self.Backend()) ffi.cdef(""" float sinf(float x); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sinf(1.23) assert type(x) is float assert x != math.sin(1.23) # rounding effects @@ -67,14 +71,14 @@ ffi.cdef(""" void sin(double x); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x is None def test_dlopen_filename(self): - path = ctypes.util.find_library("m") + path = ctypes.util.find_library(lib_m) if not path: - py.test.skip("libm not found") + py.test.skip("%s not found" % lib_m) ffi = FFI(backend=self.Backend()) ffi.cdef(""" double cos(double x); @@ -92,7 +96,7 @@ ffi.cdef(""" double cos(double x); """) - m = ffi.dlopen("m", ffi.RTLD_LAZY | ffi.RTLD_LOCAL) + m = ffi.dlopen(lib_m, ffi.RTLD_LAZY | ffi.RTLD_LOCAL) x = m.cos(1.23) assert x == math.cos(1.23) @@ -293,7 +297,7 @@ typedef double func_t(double); func_t sin; """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x == math.sin(1.23) @@ -356,7 +360,7 @@ ffi.cdef(""" int nonexistent(); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) assert not hasattr(m, 'nonexistent') def test_wraps_from_stdlib(self): @@ -370,7 +374,7 @@ def wrapper(*args): return f(*args) + 100 return wrapper - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) sin100 = my_decorator(m.sin) x = sin100(1.23) assert x == math.sin(1.23) + 100 diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py @@ -37,7 +37,7 @@ totalsize=-1, totalalignment=-1, sflags=0): assert isinstance(s, FakeStruct) s.fields = fields - + def new_array_type(self, ptrtype, length): return FakeType('' % (ptrtype, length)) @@ -61,7 +61,7 @@ return ', '.join([str(y) + str(x) for x, y, z in self.fields]) class FakeLibrary(object): - + def load_function(self, BType, name): return FakeFunction(BType, name) @@ -71,11 +71,17 @@ self.BType = str(BType) self.name = name +lib_m = "m" +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' def test_simple(): ffi = FFI(backend=FakeBackend()) ffi.cdef("double sin(double x);") - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) func = m.sin # should be a callable on real backends assert func.name == 'sin' assert func.BType == '), , False>' @@ -149,7 +155,7 @@ x, double/*several*//*comment*/y) /*on the same line*/ ; """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) func = m.sin assert func.name == 'sin' assert func.BType == ', ), , False>' diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_unicode_literals.py b/pypy/module/test_lib_pypy/cffi_tests/test_unicode_literals.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_unicode_literals.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_unicode_literals.py @@ -11,6 +11,13 @@ import sys, math from cffi import FFI +lib_m = "m" +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' + def test_cast(): ffi = FFI() @@ -56,7 +63,7 @@ def test_dlopen(): ffi = FFI() ffi.cdef("double sin(double x);") - m = ffi.dlopen("m") # unicode literal + m = ffi.dlopen(lib_m) # unicode literal x = m.sin(1.23) assert x == math.sin(1.23) diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -5,7 +5,12 @@ from pypy.module.test_lib_pypy.cffi_tests.support import * +lib_m = ['m'] if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = ['msvcrt'] pass # no obvious -Werror equivalent on MSVC else: if (sys.platform == 'darwin' and @@ -64,13 +69,13 @@ def test_simple_case(): ffi = FFI() ffi.cdef("double sin(double x);") - lib = ffi.verify('#include ', libraries=["m"]) + lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) def test_rounding_1(): ffi = FFI() ffi.cdef("float sin(double x);") - lib = ffi.verify('#include ', libraries=["m"]) + lib = ffi.verify('#include ', libraries=lib_m) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -78,7 +83,7 @@ def test_rounding_2(): ffi = FFI() ffi.cdef("double sin(float x);") - lib = ffi.verify('#include ', libraries=["m"]) + lib = ffi.verify('#include ', libraries=lib_m) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -104,7 +109,7 @@ def test_longdouble(): ffi = FFI() ffi.cdef("long double sinl(long double x);") - lib = ffi.verify('#include ', libraries=["m"]) + lib = ffi.verify('#include ', libraries=lib_m) for input in [1.23, ffi.cast("double", 1.23), ffi.cast("long double", 1.23)]: diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py @@ -8,6 +8,13 @@ class DistUtilsTest(object): + def setup_class(self): + self.lib_m = "m" + if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + self.lib_m = 'msvcrt' def test_locate_engine_class(self): cls = _locate_engine_class(FFI(), self.generic) @@ -27,7 +34,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) v.write_source() with open(v.sourcefilename, 'r') as f: data = f.read() @@ -38,7 +45,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) v.sourcefilename = filename = str(udir.join('write_source.c')) v.write_source() assert filename == v.sourcefilename @@ -51,7 +58,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) try: from StringIO import StringIO except ImportError: @@ -65,7 +72,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) v.compile_module() assert v.get_module_name().startswith('_cffi_') if v.generates_python_module(): @@ -77,7 +84,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!2*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) basename = self.__class__.__name__ + 'test_compile_module' v.modulefilename = filename = str(udir.join(basename + '.so')) v.compile_module() @@ -94,7 +101,7 @@ ffi.cdef("%s sin(double x);" % csrc) v = Verifier(ffi, "#include ", force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) names.append(v.get_module_name()) assert names[0] == names[1] != names[2] @@ -112,7 +119,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there %s!3*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -123,7 +130,7 @@ udir.join('test_verifier_args.h').write('#include \n') v = Verifier(ffi, csrc, include_dirs=[str(udir)], force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -132,7 +139,7 @@ ffi.cdef("double sin(double x);") csrc = "/*6%s*/\n#include " % self lib = ffi.verify(csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) assert lib.sin(12.3) == math.sin(12.3) assert isinstance(ffi.verifier, Verifier) with open(ffi.verifier.sourcefilename, 'r') as f: @@ -150,7 +157,7 @@ ''' lib = ffi.verify(csrc, define_macros=[('TEST_EXTENSION_OBJECT', '1')], force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) assert lib.sin(12.3) == math.sin(12.3) v = ffi.verifier ext = v.get_extension() @@ -164,7 +171,7 @@ ffi.cdef("double sin(double x);") csrc = '/*hi there9!%s*/\n#include \n' % self v = Verifier(ffi, csrc, force_generic_engine=self.generic, - libraries=["m"]) + libraries=[self.lib_m]) assert not os.path.exists(v.sourcefilename) v.get_extension() assert os.path.exists(v.sourcefilename) diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -271,7 +271,13 @@ def optimize_GUARD_VALUE(self, op): value = self.getvalue(op.getarg(0)) if value.is_virtual(): - raise InvalidLoop('A promote of a virtual (a recently allocated object) never makes sense!') + arg = value.get_constant_class(self.optimizer.cpu) + if arg: + addr = arg.getaddr() + name = self.optimizer.metainterp_sd.get_name_from_address(addr) + else: + name = "" + raise InvalidLoop('A promote of a virtual %s (a recently allocated object) never makes sense!' % name) if value.last_guard: # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value, which is rather silly. diff --git a/rpython/jit/metainterp/optimizeopt/test/test_disable_optimizations.py b/rpython/jit/metainterp/optimizeopt/test/test_disable_optimizations.py --- a/rpython/jit/metainterp/optimizeopt/test/test_disable_optimizations.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_disable_optimizations.py @@ -35,8 +35,8 @@ def raises(self, e, fn, *args): try: fn(*args) - except e: - pass + except Exception, e: + return e opt = allopts[optnum] exec "TestNo%sLLtype = TestLLtype" % (opt[0].upper() + opt[1:]) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -91,7 +91,7 @@ return loop def raises(self, e, fn, *args): - py.test.raises(e, fn, *args) + return py.test.raises(e, fn, *args).value class OptimizeOptTest(BaseTestWithUnroll): @@ -2824,8 +2824,10 @@ guard_value(p2, ConstPtr(myptr)) [] jump(p2) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + exc = self.raises(InvalidLoop, self.optimize_loop, + ops, "crash!") + if exc: + assert "node" in exc.msg def test_merge_guard_class_guard_value(self): ops = """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -321,6 +321,13 @@ max_retrace_guards = 15 jitcounter = DeterministicJitCounter() + def get_name_from_address(self, addr): + # hack + try: + return "".join(addr.ptr.name)[:-1] # remove \x00 + except AttributeError: + return "" + class Storage(compile.ResumeGuardDescr): "for tests." def __init__(self, metainterp_sd=None, original_greenkey=None): From noreply at buildbot.pypy.org Mon Mar 3 03:25:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 03:25:31 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix pwd on 32bit? Message-ID: <20140303022531.AA55C1C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69627:c212634712e1 Date: 2014-03-02 21:24 -0500 http://bitbucket.org/pypy/pypy/changeset/c212634712e1/ Log: fix pwd on 32bit? diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -372,7 +372,6 @@ return OpErrFmt, strings class OpErrFmtNoArgs(OperationError): - def __init__(self, w_type, value): self._value = value self.setup(w_type) diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -3,7 +3,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from rpython.rlib.rarithmetic import intmask, most_pos_value_of, widen eci = ExternalCompilationInfo(includes=['pwd.h']) @@ -51,8 +50,8 @@ w_tuple = space.newtuple([ space.wrap(rffi.charp2str(pw.c_pw_name)), space.wrap(rffi.charp2str(pw.c_pw_passwd)), - space.wrap(intmask(pw.c_pw_uid)), - space.wrap(intmask(pw.c_pw_gid)), + space.wrap(pw.c_pw_uid), + space.wrap(pw.c_pw_gid), space.wrap(rffi.charp2str(pw.c_pw_gecos)), space.wrap(rffi.charp2str(pw.c_pw_dir)), space.wrap(rffi.charp2str(pw.c_pw_shell)), @@ -67,18 +66,22 @@ Return the password database entry for the given numeric user ID. See pwd.__doc__ for more on password database entries. """ + msg = "getpwuid(): uid not found" try: - uid = space.int_w(w_uid) - if uid < -1 or uid > widen(most_pos_value_of(uid_t)): + val = space.int_w(w_uid) + uid = rffi.cast(uid_t, val) + if val == -1: + pass + elif val < 0 or uid != val: raise OperationError(space.w_OverflowError, None) except OperationError, e: if e.match(space, space.w_OverflowError): - raise oefmt(space.w_KeyError, "getpwuid(): uid not found") + raise oefmt(space.w_KeyError, msg) raise - uid = rffi.cast(uid_t, uid) pw = c_getpwuid(uid) if not pw: - raise oefmt(space.w_KeyError, "getpwuid(): uid not found: %d", widen(uid)) + raise OperationError(space.w_KeyError, space.wrap( + "%s: %d" % (msg, uid))) return make_struct_passwd(space, pw) diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -28,9 +28,13 @@ # -1 is allowed, cast to uid_t exc = raises(KeyError, pwd.getpwuid, -1) m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value[0]) - assert m + assert m, exc.value[0] maxval = int(m.group(1)) assert maxval >= 2**32 - 1 + # shouldn't overflow + exc = raises(KeyError, pwd.getpwuid, maxval) + m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value[0]) + assert m, exc.value[0] # should be out of uid_t range for v in [-2, maxval+1, 2**128, -2**128]: exc = raises(KeyError, pwd.getpwuid, v) From noreply at buildbot.pypy.org Mon Mar 3 03:25:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 03:25:33 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: cleanup Message-ID: <20140303022533.077D91C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69628:312589f4d6e7 Date: 2014-03-02 21:18 -0500 http://bitbucket.org/pypy/pypy/changeset/312589f4d6e7/ Log: cleanup diff --git a/pypy/module/_sre/test/support_test_app_sre.py b/pypy/module/_sre/test/support_test_app_sre.py --- a/pypy/module/_sre/test/support_test_app_sre.py +++ b/pypy/module/_sre/test/support_test_app_sre.py @@ -1,6 +1,6 @@ """Support functions for app-level _sre tests.""" import locale, _sre -from sre_constants import OPCODES, ATCODES, CHCODES +from sre_constants import OPCODES, ATCODES, CHCODES, MAXREPEAT def encode_literal(string): opcodes = [] diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -1,12 +1,15 @@ """Regular expression tests specific to _sre.py and accumulated during TDD.""" + import os import py from py.test import raises, skip from pypy.interpreter.gateway import app2interp_temp + def init_app_test(cls, space): - cls.w_s = space.appexec([space.wrap(os.path.realpath(os.path.dirname(__file__)))], - """(this_dir): + cls.w_s = space.appexec( + [space.wrap(os.path.realpath(os.path.dirname(__file__)))], + """(this_dir): import sys # Uh-oh, ugly hack sys.path.insert(0, this_dir) @@ -15,11 +18,10 @@ return support_test_app_sre finally: sys.path.pop(0) - """) + """) class AppTestSrePy: - def test_magic(self): import _sre, sre_constants assert sre_constants.MAGIC == _sre.MAGIC @@ -30,7 +32,6 @@ class AppTestSrePattern: - def test_copy(self): # copy support is disabled by default in _sre.c import re @@ -94,7 +95,7 @@ class AppTestSreMatch: spaceconfig = dict(usemodules=('array', )) - + def test_copy(self): import re # copy support is disabled by default in _sre.c @@ -242,6 +243,10 @@ assert u2 == u1 assert type(u2) is unicode # and not MyUnicode + def test_sub_bug(self): + import re + assert re.sub('=\w{2}', 'x', '=CA') == 'x' + def test_match_array(self): import re, array a = array.array('c', 'hello') @@ -294,7 +299,6 @@ class AppTestSreScanner: - def test_scanner_attributes(self): import re p = re.compile("bla") @@ -346,7 +350,7 @@ def setup_method(self, method): import locale locale.setlocale(locale.LC_ALL, (None, None)) - + def teardown_method(self, method): import locale locale.setlocale(locale.LC_ALL, (None, None)) @@ -382,10 +386,9 @@ s.assert_lower_equal([("a", "a"), ("A", "a"), (UPPER_AE, LOWER_AE), (u"\u00c4", u"\u00e4"), (UPPER_PI, LOWER_PI), (u"\u4444", u"\u4444")], sre_constants.SRE_FLAG_UNICODE) - + class AppTestSimpleSearches: - def test_search_simple_literal(self): import re assert re.search("bla", "bla") @@ -556,16 +559,8 @@ assert re.search(r"b(? Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69629:8903d4b0e55e Date: 2014-03-02 21:13 -0500 http://bitbucket.org/pypy/pypy/changeset/8903d4b0e55e/ Log: add sre overflow test diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -32,6 +32,10 @@ class AppTestSrePattern: + def setup_class(cls): + # This imports support_test_sre as the global "s" + init_app_test(cls, cls.space) + def test_copy(self): # copy support is disabled by default in _sre.c import re @@ -48,6 +52,16 @@ assert 2 == p.groups assert {"g": 2} == p.groupindex + def test_repeat_minmax_overflow(self): + import re + string = "x" * 100000 + assert re.match(r".{%d}" % (self.s.MAXREPEAT - 1), string) is None + assert re.match(r".{,%d}" % (self.s.MAXREPEAT - 1), string).span() == (0, 100000) + assert re.match(r".{%d,}?" % (self.s.MAXREPEAT - 1), string) is None + raises(OverflowError, re.compile, r".{%d}" % self.s.MAXREPEAT) + raises(OverflowError, re.compile, r".{,%d}" % self.s.MAXREPEAT) + raises(OverflowError, re.compile, r".{%d,}?" % self.s.MAXREPEAT) + def test_match_none(self): import re p = re.compile("bla") From noreply at buildbot.pypy.org Mon Mar 3 04:33:18 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 04:33:18 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: now, fix pwd translation on 64bit? Message-ID: <20140303033318.13B5F1C3373@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69630:510ebdd4604e Date: 2014-03-02 22:32 -0500 http://bitbucket.org/pypy/pypy/changeset/510ebdd4604e/ Log: now, fix pwd translation on 64bit? diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -3,6 +3,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec +from rpython.rlib.rarithmetic import widen eci = ExternalCompilationInfo(includes=['pwd.h']) @@ -69,7 +70,7 @@ msg = "getpwuid(): uid not found" try: val = space.int_w(w_uid) - uid = rffi.cast(uid_t, val) + uid = widen(rffi.cast(uid_t, val)) if val == -1: pass elif val < 0 or uid != val: From noreply at buildbot.pypy.org Mon Mar 3 05:57:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 05:57:48 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix cpyext test_sre Message-ID: <20140303045749.03F7D1C0865@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69631:b0cdbb4c9947 Date: 2014-03-02 23:56 -0500 http://bitbucket.org/pypy/pypy/changeset/b0cdbb4c9947/ Log: fix cpyext test_sre diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -180,12 +180,16 @@ del x, y def test_sre(self): + import sys + for m in ['_sre', 'sre_compile', 'sre_constants', 'sre_parse', 're']: + # clear out these modules + try: + del sys.modules[m] + except KeyError: + pass module = self.import_module(name='_sre') - import sre_compile - sre_compile._sre = module - assert sre_compile.MAGIC == module.MAGIC import re - import time + assert re.sre_compile._sre is module s = u"Foo " * 1000 + u"Bar" prog = re.compile(ur"Foo.*Bar") assert prog.match(s) From noreply at buildbot.pypy.org Mon Mar 3 06:50:12 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 06:50:12 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: more fixes for zlib Message-ID: <20140303055012.745BA1D23D1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69632:00860b5ad1a8 Date: 2014-03-03 00:48 -0500 http://bitbucket.org/pypy/pypy/changeset/00860b5ad1a8/ Log: more fixes for zlib diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py --- a/pypy/module/zlib/interp_zlib.py +++ b/pypy/module/zlib/interp_zlib.py @@ -2,7 +2,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, interp_attrproperty -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.objectmodel import keepalive_until_here @@ -87,7 +87,7 @@ return space.wrap(result) - at unwrap_spec(string='bufferstr', wbits=int, bufsize=int) + at unwrap_spec(string='bufferstr', wbits="c_int", bufsize=int) def decompress(space, string, wbits=rzlib.MAX_WBITS, bufsize=0): """ decompress(string[, wbits[, bufsize]]) -- Return decompressed string. @@ -116,7 +116,6 @@ stream = rzlib.null_stream def __init__(self, space): - self.space = space self._lock = space.allocate_lock() def lock(self): @@ -146,7 +145,7 @@ self.stream = rzlib.deflateInit(level, method, wbits, memLevel, strategy) except rzlib.RZlibError, e: - raise zlib_error(self.space, e.msg) + raise zlib_error(space, e.msg) except ValueError: raise OperationError(space.w_ValueError, space.wrap("Invalid initialization option")) @@ -158,7 +157,7 @@ self.stream = rzlib.null_stream @unwrap_spec(data='bufferstr') - def compress(self, data): + def compress(self, space, data): """ compress(data) -- Return a string containing data compressed. @@ -171,17 +170,17 @@ self.lock() try: if not self.stream: - raise zlib_error(self.space, + raise zlib_error(space, "compressor object already flushed") result = rzlib.compress(self.stream, data) finally: self.unlock() except rzlib.RZlibError, e: - raise zlib_error(self.space, e.msg) - return self.space.wrap(result) + raise zlib_error(space, e.msg) + return space.wrap(result) @unwrap_spec(mode="c_int") - def flush(self, mode=rzlib.Z_FINISH): + def flush(self, space, mode=rzlib.Z_FINISH): """ flush( [mode] ) -- Return a string containing any remaining compressed data. @@ -197,7 +196,7 @@ self.lock() try: if not self.stream: - raise zlib_error(self.space, + raise zlib_error(space, "compressor object already flushed") result = rzlib.compress(self.stream, '', mode) if mode == rzlib.Z_FINISH: # release the data structures now @@ -206,8 +205,8 @@ finally: self.unlock() except rzlib.RZlibError, e: - raise zlib_error(self.space, e.msg) - return self.space.wrap(result) + raise zlib_error(space, e.msg) + return space.wrap(result) @unwrap_spec(level=int, method=int, wbits=int, memLevel=int, strategy=int) @@ -257,7 +256,7 @@ try: self.stream = rzlib.inflateInit(wbits) except rzlib.RZlibError, e: - raise zlib_error(self.space, e.msg) + raise zlib_error(space, e.msg) except ValueError: raise OperationError(space.w_ValueError, space.wrap("Invalid initialization option")) @@ -278,8 +277,8 @@ else: self.unconsumed_tail = tail - @unwrap_spec(data='bufferstr', max_length=int) - def decompress(self, data, max_length=0): + @unwrap_spec(data='bufferstr', max_length="c_int") + def decompress(self, space, data, max_length=0): """ decompress(data[, max_length]) -- Return a string containing the decompressed version of the data. @@ -291,9 +290,8 @@ if max_length == 0: max_length = sys.maxint elif max_length < 0: - raise OperationError(self.space.w_ValueError, - self.space.wrap("max_length must be " - "greater than zero")) + raise oefmt(space.w_ValueError, + "max_length must be greater than zero") try: self.lock() try: @@ -301,35 +299,36 @@ finally: self.unlock() except rzlib.RZlibError, e: - raise zlib_error(self.space, e.msg) + raise zlib_error(space, e.msg) string, finished, unused_len = result self._save_unconsumed_input(data, finished, unused_len) - return self.space.wrap(string) + return space.wrap(string) - @unwrap_spec(mode="c_int") - def flush(self, mode=rzlib.Z_FINISH): + def flush(self, space, w_length=None): """ flush( [length] ) -- This is kept for backward compatibility, because each call to decompress() immediately returns as much data as possible. """ - if mode == rzlib.Z_NO_FLUSH: - return self.space.wrap("") - + if w_length is not None: + length = space.c_int_w(w_length) + if length <= 0: + raise oefmt(space.w_ValueError, + "length must be greater than zero") data = self.unconsumed_tail try: self.lock() try: - result = rzlib.decompress(self.stream, data, mode) + result = rzlib.decompress(self.stream, data, rzlib.Z_FINISH) finally: self.unlock() - except rzlib.RZlibError, e: - raise zlib_error(self.space, e.msg) - - string, finished, unused_len = result - self._save_unconsumed_input(data, finished, unused_len) - return self.space.wrap(string) + except rzlib.RZlibError: + string = "" + else: + string, finished, unused_len = result + self._save_unconsumed_input(data, finished, unused_len) + return space.wrap(string) @unwrap_spec(wbits=int) diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -33,14 +33,12 @@ cls.w_expanded = cls.space.wrap(expanded) cls.w_compressed = cls.space.wrap(zlib.compress(expanded)) - def test_error(self): """ zlib.error should be an exception class. """ assert issubclass(self.zlib.error, Exception) - def test_crc32(self): """ When called with a string, zlib.crc32 should compute its CRC32 and @@ -162,13 +160,46 @@ raises(self.zlib.error, self.zlib.decompress, self.compressed[:-2]) raises(self.zlib.error, self.zlib.decompress, 'foobar') + def test_bad_arguments(self): + import zlib + raises(ValueError, zlib.decompressobj().flush, 0) + raises(ValueError, zlib.decompressobj().flush, -1) + raises(TypeError, zlib.decompressobj().flush, None) + raises(OverflowError, zlib.decompressobj().flush, 2**31) + raises(ValueError, zlib.decompressobj().decompress, 'abc', -1) + raises(TypeError, zlib.decompressobj().decompress, 'abc', None) + raises(OverflowError, zlib.decompressobj().decompress, 'abc', 2**31) + raises(TypeError, self.zlib.decompress, self.compressed, None) + raises(OverflowError, self.zlib.decompress, self.compressed, 2**31) + + def test_empty_flush(self): + import zlib + co = zlib.compressobj(zlib.Z_BEST_COMPRESSION) + assert co.flush() # Returns a zlib header + dco = zlib.decompressobj() + assert dco.flush() == "" + + def test_decompress_incomplete_stream(self): + import zlib + # This is 'foo', deflated + x = 'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' + # For the record + assert zlib.decompress(x) == 'foo' + raises(zlib.error, zlib.decompress, x[:-5]) + # Omitting the stream end works with decompressor objects + # (see issue #8672). + dco = zlib.decompressobj() + y = dco.decompress(x[:-5]) + y += dco.flush() + assert y == 'foo' + def test_unused_data(self): """ Try to feed too much data to zlib.decompress(). It should show up in the unused_data attribute. """ d = self.zlib.decompressobj() - s = d.decompress(self.compressed + 'extrastuff') + s = d.decompress(self.compressed + 'extrastuff', 0) assert s == self.expanded assert d.unused_data == 'extrastuff' assert d.flush() == '' @@ -232,6 +263,6 @@ dco.decompress(data, 1) del data data = self.zlib.compress(input2) - assert dco.flush() == input1[1:] + assert dco.flush(1) == input1[1:] assert dco.unused_data == '' assert dco.unconsumed_tail == '' From noreply at buildbot.pypy.org Mon Mar 3 07:04:26 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 07:04:26 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix translation for 32bit Message-ID: <20140303060426.E265C1C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69633:50a62210462c Date: 2014-03-03 01:03 -0500 http://bitbucket.org/pypy/pypy/changeset/50a62210462c/ Log: fix translation for 32bit diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -75,12 +75,6 @@ # to handle the win64 special case: is_emulated_long = _long_typecode != 'l' -SHRT_MIN = -2**(_get_bitsize('h') - 1) -SHRT_MAX = 2**(_get_bitsize('h') - 1) - 1 -INT_MIN = -2**(_get_bitsize('i') - 1) -INT_MAX = 2**(_get_bitsize('i') - 1) - 1 -UINT_MAX = 2**_get_bitsize('i') - 1 - LONG_BIT = _get_long_bit() LONG_MASK = (2**LONG_BIT)-1 LONG_TEST = 2**(LONG_BIT-1) @@ -298,7 +292,6 @@ class base_int(long): """ fake unsigned integer implementation """ - def _widen(self, other, value): """ if one argument is int or long, the other type wins. @@ -544,8 +537,11 @@ # needed for ll_os_stat.time_t_to_FILE_TIME in the 64 bit case r_uint32 = build_int('r_uint32', False, 32) -# needed for ll_time.time_sleep_llimpl -maxint32 = int((1 << 31) -1) +SHRT_MIN = -2**(_get_bitsize('h') - 1) +SHRT_MAX = 2**(_get_bitsize('h') - 1) - 1 +INT_MIN = -2**(_get_bitsize('i') - 1) +INT_MAX = 2**(_get_bitsize('i') - 1) - 1 +UINT_MAX = r_uint(2**_get_bitsize('i') - 1) # the 'float' C type diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py --- a/rpython/rtyper/module/ll_time.py +++ b/rpython/rtyper/module/ll_time.py @@ -9,7 +9,7 @@ from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.extfunc import BaseLazyRegistering, registering, extdef from rpython.rlib import rposix -from rpython.rlib.rarithmetic import intmask, maxint32 +from rpython.rlib.rarithmetic import intmask, UINT_MAX from rpython.translator.tool.cbuild import ExternalCompilationInfo if sys.platform == 'win32': @@ -183,19 +183,17 @@ @registering(time.sleep) def register_time_sleep(self): if sys.platform == 'win32': - MAX = maxint32 Sleep = self.llexternal('Sleep', [rffi.ULONG], lltype.Void) def time_sleep_llimpl(secs): millisecs = secs * 1000.0 - while millisecs > MAX: - Sleep(MAX) - millisecs -= MAX + while millisecs > UINT_MAX: + Sleep(UINT_MAX) + millisecs -= UINT_MAX Sleep(rffi.cast(rffi.ULONG, int(millisecs))) else: c_select = self.llexternal('select', [rffi.INT, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP, self.TIMEVALP], rffi.INT) - def time_sleep_llimpl(secs): void = lltype.nullptr(rffi.VOIDP.TO) t = lltype.malloc(self.TIMEVAL, flavor='raw') From noreply at buildbot.pypy.org Mon Mar 3 07:06:40 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 07:06:40 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: make sure these aren't longs too Message-ID: <20140303060640.5233A1C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69634:658201d89b47 Date: 2014-03-03 01:06 -0500 http://bitbucket.org/pypy/pypy/changeset/658201d89b47/ Log: make sure these aren't longs too diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -539,8 +539,8 @@ SHRT_MIN = -2**(_get_bitsize('h') - 1) SHRT_MAX = 2**(_get_bitsize('h') - 1) - 1 -INT_MIN = -2**(_get_bitsize('i') - 1) -INT_MAX = 2**(_get_bitsize('i') - 1) - 1 +INT_MIN = int(-2**(_get_bitsize('i') - 1)) +INT_MAX = int(2**(_get_bitsize('i') - 1) - 1) UINT_MAX = r_uint(2**_get_bitsize('i') - 1) # the 'float' C type From noreply at buildbot.pypy.org Mon Mar 3 08:14:15 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 08:14:15 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: skip this test if platform doesnt have select.poll Message-ID: <20140303071415.B55281C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69635:95d6e2b29471 Date: 2014-03-03 02:13 -0500 http://bitbucket.org/pypy/pypy/changeset/95d6e2b29471/ Log: skip this test if platform doesnt have select.poll diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -215,6 +215,8 @@ def test_poll_arguments(self): import select + if not hasattr(select, 'poll'): + skip("no select.poll() on this platform") pollster = select.poll() pollster.register(1) exc = raises(OverflowError, pollster.register, 0, 32768) # SHRT_MAX + 1 From noreply at buildbot.pypy.org Mon Mar 3 10:35:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 10:35:47 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: try to fix rawffi on 32bit Message-ID: <20140303093547.08E021C3373@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69636:322db849e374 Date: 2014-03-03 03:20 -0500 http://bitbucket.org/pypy/pypy/changeset/322db849e374/ Log: try to fix rawffi on 32bit diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -5,7 +5,6 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import interp_attrproperty from pypy.interpreter.typedef import TypeDef, GetSetProperty -from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError, oefmt from pypy.module._rawffi.interp_rawffi import segfault_exception, _MS_WINDOWS from pypy.module._rawffi.interp_rawffi import W_DataShape, W_DataInstance @@ -17,6 +16,7 @@ from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib import clibffi, rgc from rpython.rlib.rarithmetic import intmask, signedtype, widen, r_uint +from rpython.rtyper.lltypesystem import lltype, rffi def unpack_fields(space, w_fields): @@ -303,7 +303,7 @@ if numbits: lowbit = LOW_BIT(bitsize) bitmask = BIT_MASK(numbits, ll_t) - value = widen(rffi.cast(ll_t, value)) + value = widen(value) value >>= lowbit value &= bitmask if ll_t is lltype.Bool or signedtype(ll_t._type): diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -269,7 +269,7 @@ elif isinstance(s, SomeBool): values = [False, True] else: - raise annmodel.AnnotatorError("memo call: argument must be a class" + raise annmodel.AnnotatorError("memo call: argument must be a class " "or a frozen PBC, got %r" % (s,)) argvalues.append(values) # the list of all possible tuples of arguments to give to the memo function diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -149,8 +149,7 @@ return False r_class = rffi.platform.numbertype_to_rclass[tp] assert issubclass(r_class, base_int) - return r_class.BITS < LONG_BIT or ( - r_class.BITS == LONG_BIT and r_class.SIGNED) + return r_class.BITS < LONG_BIT _should_widen_type._annspecialcase_ = 'specialize:memo' # the replacement for sys.maxint From noreply at buildbot.pypy.org Mon Mar 3 10:35:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 10:35:48 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: clean up fcntl overflow checking Message-ID: <20140303093548.5CDAE1C3373@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69637:646404c7a0b2 Date: 2014-03-03 04:16 -0500 http://bitbucket.org/pypy/pypy/changeset/646404c7a0b2/ Log: clean up fcntl overflow checking diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1546,11 +1546,16 @@ raise OperationError(self.w_TypeError, self.wrap("fileno() returned a non-integer") ) - fd = self.int_w(w_fd) - if fd < 0 or fd > INT_MAX: + try: + fd = self.c_int_w(w_fd) + except OperationError, e: + if e.match(self, self.w_OverflowError): + fd = -1 + else: + raise + if fd < 0: raise oefmt(self.w_ValueError, - "file descriptor cannot be a negative integer (%d)", - fd) + "file descriptor cannot be a negative integer (%d)", fd) return fd def warn(self, w_msg, w_warningcls, stacklevel=2): diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -35,11 +35,16 @@ fcntl.fcntl(F(long(f.fileno())), 1) raises(TypeError, fcntl.fcntl, "foo") raises(TypeError, fcntl.fcntl, f, "foo") - raises(TypeError, fcntl.fcntl, F("foo"), 1) - raises(ValueError, fcntl.fcntl, 2147483647 + 1, 1, 0) - raises(ValueError, fcntl.fcntl, F(2147483647 + 1), 1, 0) - raises(ValueError, fcntl.fcntl, -2147483648 - 1, 1, 0) - raises(ValueError, fcntl.fcntl, F(-2147483648 - 1), 1, 0) + exc = raises(TypeError, fcntl.fcntl, F("foo"), 1) + assert exc.value[0] == 'fileno() returned a non-integer' + exc = raises(ValueError, fcntl.fcntl, 2147483647 + 1, 1, 0) + assert exc.value[0] == 'file descriptor cannot be a negative integer (-1)' + exc = raises(ValueError, fcntl.fcntl, F(2147483647 + 1), 1, 0) + assert exc.value[0] == 'file descriptor cannot be a negative integer (-1)' + exc = raises(ValueError, fcntl.fcntl, -2147483648 - 1, 1, 0) + assert exc.value[0] == 'file descriptor cannot be a negative integer (-1)' + exc = raises(ValueError, fcntl.fcntl, F(-2147483648 - 1), 1, 0) + assert exc.value[0] == 'file descriptor cannot be a negative integer (-1)' raises(ValueError, fcntl.fcntl, -1, 1, 0) raises(ValueError, fcntl.fcntl, F(-1), 1, 0) raises(ValueError, fcntl.fcntl, F(long(-1)), 1, 0) From noreply at buildbot.pypy.org Mon Mar 3 10:35:49 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 10:35:49 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix test_pwd on 32bit Message-ID: <20140303093549.7E6CB1C3373@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69638:3d01ddb145ff Date: 2014-03-03 04:32 -0500 http://bitbucket.org/pypy/pypy/changeset/3d01ddb145ff/ Log: fix test_pwd on 32bit diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -51,8 +51,8 @@ w_tuple = space.newtuple([ space.wrap(rffi.charp2str(pw.c_pw_name)), space.wrap(rffi.charp2str(pw.c_pw_passwd)), - space.wrap(pw.c_pw_uid), - space.wrap(pw.c_pw_gid), + space.int(space.wrap(pw.c_pw_uid)), + space.int(space.wrap(pw.c_pw_gid)), space.wrap(rffi.charp2str(pw.c_pw_gecos)), space.wrap(rffi.charp2str(pw.c_pw_dir)), space.wrap(rffi.charp2str(pw.c_pw_shell)), From noreply at buildbot.pypy.org Mon Mar 3 10:36:39 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 3 Mar 2014 10:36:39 +0100 (CET) Subject: [pypy-commit] stmgc default: add a #define to disable re-sharing of pages Message-ID: <20140303093639.42ADB1C3373@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r932:0f2b1adc9862 Date: 2014-03-03 10:37 +0100 http://bitbucket.org/pypy/stmgc/changeset/0f2b1adc9862/ Log: add a #define to disable re-sharing of pages diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -340,7 +340,7 @@ /* first, if we're not seeing segment 0, we must change the flags in flag_page_private[] from PRIVATE_PAGE to REMAPPING_PAGE, which will mean "can't re-share" */ - if (segment_base != stm_object_pages) + if (segment_base != stm_object_pages && RESHARE_PAGES) mark_flag_page_private(obj, segment_base); /* trace into the object (the version from 'segment_base') */ @@ -546,7 +546,8 @@ /* sweeping */ mutex_pages_lock(); - major_reshare_pages(); + if (RESHARE_PAGES) + major_reshare_pages(); sweep_large_objects(); //sweep_uniform_pages(); mutex_pages_unlock(); diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -14,6 +14,10 @@ #define GC_MIN (NB_NURSERY_PAGES * 4096 * 8) #define GC_MAJOR_COLLECT 1.82 +/* re-share pages after major collections (1 or 0) */ +#define RESHARE_PAGES 1 + + static char *uninitialized_page_start; /* within segment 0 */ static char *uninitialized_page_stop; From noreply at buildbot.pypy.org Mon Mar 3 11:01:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Mar 2014 11:01:52 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix (from demo/demo_random): needs to check more directly if we use Message-ID: <20140303100152.9E47D1C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r933:0126f6216527 Date: 2014-03-03 11:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/0126f6216527/ Log: Fix (from demo/demo_random): needs to check more directly if we use 'overflow_number' or not. Otherwise a badly placed major collection can run minor_collect(commit=false) in this transaction. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -331,9 +331,6 @@ assert(STM_PSEGMENT->safe_point == SP_RUNNING); assert(STM_PSEGMENT->running_pthread == pthread_self()); - bool has_any_overflow_object = - (STM_PSEGMENT->objects_pointing_to_nursery != NULL); - minor_collection(/*commit=*/ true); s_mutex_lock(); @@ -363,10 +360,11 @@ push_modified_to_other_segments(); /* update 'overflow_number' if needed */ - if (has_any_overflow_object) { + if (STM_PSEGMENT->overflow_number_has_been_used) { highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0; assert(highest_overflow_number != 0); /* XXX else, overflow! */ STM_PSEGMENT->overflow_number = highest_overflow_number; + STM_PSEGMENT->overflow_number_has_been_used = false; } /* send what is hopefully the correct signals */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -97,6 +97,7 @@ transaction is done, but only if we actually overflowed any object; otherwise, no object has got this number. */ uint32_t overflow_number; + bool overflow_number_has_been_used; /* The marker stored in the global 'write_locks' array to mean "this segment has modified this old object". */ diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -248,6 +248,11 @@ dprintf(("minor_collection commit=%d\n", (int)commit)); STM_PSEGMENT->minor_collect_will_commit_now = commit; + if (!commit) { + /* 'STM_PSEGMENT->overflow_number' is used now by this collection, + in the sense that it's copied to the overflow objects */ + STM_PSEGMENT->overflow_number_has_been_used = true; + } /* We need this to track the large overflow objects for a future commit. We don't need it if we're committing now. */ From noreply at buildbot.pypy.org Mon Mar 3 11:01:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Mar 2014 11:01:53 +0100 (CET) Subject: [pypy-commit] stmgc default: Turn this to 0 again for now; occasionally segfaults in demo_random. Message-ID: <20140303100153.B0EB01C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r934:034e94678c5b Date: 2014-03-03 11:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/034e94678c5b/ Log: Turn this to 0 again for now; occasionally segfaults in demo_random. diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -15,7 +15,7 @@ #define GC_MAJOR_COLLECT 1.82 /* re-share pages after major collections (1 or 0) */ -#define RESHARE_PAGES 1 +#define RESHARE_PAGES 0 From noreply at buildbot.pypy.org Mon Mar 3 11:10:30 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 3 Mar 2014 11:10:30 +0100 (CET) Subject: [pypy-commit] stmgc default: fix _release of demo_random Message-ID: <20140303101030.265D11C02C1@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r935:e50b3c39ad9e Date: 2014-03-03 11:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/e50b3c39ad9e/ Log: fix _release of demo_random diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -186,7 +186,7 @@ /* and the same value at the end: */ nodeptr_t TLPREFIX *last_next = (nodeptr_t TLPREFIX *)((stm_char*)n + n->my_size - sizeof(void*)); - assert(n->next == *last_next); + OPT_ASSERT(n->next == *last_next); return n->next; } From noreply at buildbot.pypy.org Mon Mar 3 11:12:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Mar 2014 11:12:07 +0100 (CET) Subject: [pypy-commit] stmgc default: Add a TODO item Message-ID: <20140303101207.D5B7A1C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r936:bdf34df273ec Date: 2014-03-03 11:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/bdf34df273ec/ Log: Add a TODO item diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -6,3 +6,6 @@ - hash, id - weakrefs - finalizers + +- the highest_overflow_number can overflow after 2**30 non-collect-time + minor collections From noreply at buildbot.pypy.org Mon Mar 3 11:20:49 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 11:20:49 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: another rawffi fix Message-ID: <20140303102049.841FA1C1041@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69639:b7d5755c8c60 Date: 2014-03-03 05:00 -0500 http://bitbucket.org/pypy/pypy/changeset/b7d5755c8c60/ Log: another rawffi fix diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -418,12 +418,10 @@ if c in TYPEMAP_PTR_LETTERS: res = func(add_arg, argdesc, rffi.VOIDP) return space.wrap(rffi.cast(lltype.Unsigned, res)) - elif c == 'q' or c == 'Q' or c == 'L' or c == 'c' or c == 'u': - return space.wrap(func(add_arg, argdesc, ll_type)) elif c == 'f' or c == 'd' or c == 'g': return space.wrap(float(func(add_arg, argdesc, ll_type))) else: - return space.wrap(intmask(func(add_arg, argdesc, ll_type))) + return space.wrap(func(add_arg, argdesc, ll_type)) raise OperationError(space.w_TypeError, space.wrap("cannot directly read value")) wrap_value._annspecialcase_ = 'specialize:arg(1)' From noreply at buildbot.pypy.org Mon Mar 3 11:53:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 11:53:42 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix translation for 32bit Message-ID: <20140303105342.141391C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69640:b1c9eee30f4f Date: 2014-03-03 05:47 -0500 http://bitbucket.org/pypy/pypy/changeset/b1c9eee30f4f/ Log: fix translation for 32bit diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -149,7 +149,8 @@ return False r_class = rffi.platform.numbertype_to_rclass[tp] assert issubclass(r_class, base_int) - return r_class.BITS < LONG_BIT + return r_class.BITS < LONG_BIT or ( + r_class.BITS == LONG_BIT and r_class.SIGNED) _should_widen_type._annspecialcase_ = 'specialize:memo' # the replacement for sys.maxint From noreply at buildbot.pypy.org Mon Mar 3 11:55:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Mar 2014 11:55:15 +0100 (CET) Subject: [pypy-commit] stmgc default: The various places that fill with 0xDD bytes should not all use the same Message-ID: <20140303105515.3AECF1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r937:ae9ccd94b4ea Date: 2014-03-03 11:55 +0100 http://bitbucket.org/pypy/stmgc/changeset/ae9ccd94b4ea/ Log: The various places that fill with 0xDD bytes should not all use the same pattern. diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -254,7 +254,7 @@ #ifndef NDEBUG assert(chunk->size >= sizeof(dlist_t)); assert(chunk->size <= (((char *)last_chunk) - (char *)data)); - memset(data, 0xDD, chunk->size); + memset(data, 0xDE, chunk->size); #endif /* try to merge with the following chunk in memory */ diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -36,8 +36,8 @@ NULL accesses land. We mprotect it so that accesses fail. */ mprotect(segment_base, 4096, PROT_NONE); - /* Fill the TLS page (page 1) with 0xDD, for debugging */ - memset(REAL_ADDRESS(segment_base, 4096), 0xDD, 4096); + /* Fill the TLS page (page 1) with 0xDC, for debugging */ + memset(REAL_ADDRESS(segment_base, 4096), 0xDC, 4096); /* Make a "hole" at STM_PSEGMENT */ memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, sizeof(*STM_PSEGMENT)); From noreply at buildbot.pypy.org Mon Mar 3 12:15:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Mar 2014 12:15:04 +0100 (CET) Subject: [pypy-commit] stmgc default: Useful addition for gdb Message-ID: <20140303111504.8586F1C02AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r938:d9fa9535eaa1 Date: 2014-03-03 12:14 +0100 http://bitbucket.org/pypy/stmgc/changeset/d9fa9535eaa1/ Log: Useful addition for gdb diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -43,6 +43,11 @@ }; __thread struct thread_data td; +struct thread_data *_get_td(void) +{ + return &td; /* for gdb */ +} + ssize_t stmcb_size_rounded_up(struct object_s *ob) { From noreply at buildbot.pypy.org Mon Mar 3 12:36:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Mar 2014 12:36:43 +0100 (CET) Subject: [pypy-commit] stmgc default: More tweaks Message-ID: <20140303113643.1CD4A1C042F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r939:657df7c4acdb Date: 2014-03-03 12:36 +0100 http://bitbucket.org/pypy/stmgc/changeset/657df7c4acdb/ Log: More tweaks diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -22,10 +22,13 @@ struct node_s { struct object_s hdr; + int sig; long my_size; nodeptr_t next; }; +#define SIGNATURE 0x01234567 + static sem_t done; __thread stm_thread_local_t stm_thread_local; @@ -225,6 +228,7 @@ sizeof(struct node_s) + 4096*70}; size_t size = sizes[get_rand(4)]; p = stm_allocate(size); + ((nodeptr_t)p)->sig = SIGNATURE; ((nodeptr_t)p)->my_size = size; pop_roots(); /* reload_roots not necessary, all are old after start_transaction */ @@ -310,6 +314,8 @@ if (td.steps_left % 8 == 0) fprintf(stdout, "#"); + assert(p == NULL || ((nodeptr_t)p)->sig == SIGNATURE); + p = do_step(p); if (p == (objptr_t)-1) { @@ -351,6 +357,7 @@ stm_start_inevitable_transaction(&stm_thread_local); for (i = 0; i < SHARED_ROOTS; i++) { shared_roots[i] = stm_allocate(sizeof(struct node_s)); + ((nodeptr_t)shared_roots[i])->sig = SIGNATURE; ((nodeptr_t)shared_roots[i])->my_size = sizeof(struct node_s); STM_PUSH_ROOT(stm_thread_local, shared_roots[i]); } From noreply at buildbot.pypy.org Mon Mar 3 15:58:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Mar 2014 15:58:29 +0100 (CET) Subject: [pypy-commit] stmgc default: Baaah thanks Remi. Missing locks here. Message-ID: <20140303145829.94AE41C35E0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r940:e6540f4d2a4b Date: 2014-03-03 15:58 +0100 http://bitbucket.org/pypy/stmgc/changeset/e6540f4d2a4b/ Log: Baaah thanks Remi. Missing locks here. diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -84,6 +84,8 @@ { /* This function is called during testing, but normal programs don't need to call it. */ + assert(!_has_mutex()); + long i; for (i = 0; i < NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); @@ -124,6 +126,7 @@ void stm_register_thread_local(stm_thread_local_t *tl) { int num; + s_mutex_lock(); if (stm_all_thread_locals == NULL) { stm_all_thread_locals = tl->next = tl->prev = tl; num = 0; @@ -144,16 +147,19 @@ tl->associated_segment_num = num; _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); + s_mutex_unlock(); } void stm_unregister_thread_local(stm_thread_local_t *tl) { + s_mutex_lock(); assert(tl->next != NULL); _done_shadow_stack(tl); if (tl == stm_all_thread_locals) { stm_all_thread_locals = stm_all_thread_locals->next; if (tl == stm_all_thread_locals) { stm_all_thread_locals = NULL; + s_mutex_unlock(); return; } } @@ -161,6 +167,7 @@ tl->next->prev = tl->prev; tl->prev = NULL; tl->next = NULL; + s_mutex_unlock(); } static bool _is_tl_registered(stm_thread_local_t *tl) __attribute__((unused)); From noreply at buildbot.pypy.org Mon Mar 3 16:01:51 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 3 Mar 2014 16:01:51 +0100 (CET) Subject: [pypy-commit] stmgc default: some comments and re-enable RESHARE_PAGES Message-ID: <20140303150151.2BB771D22DE@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r941:a31dc548b60b Date: 2014-03-03 16:02 +0100 http://bitbucket.org/pypy/stmgc/changeset/a31dc548b60b/ Log: some comments and re-enable RESHARE_PAGES diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -54,6 +54,7 @@ assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); contention_management(other_segment_num); + assert(get_priv_segment(other_segment_num)->pub.nursery_end == NSE_SIGABORT); /* The rest of this code is for the case where we continue to run. We have to signal the other thread to abort, and wait diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -258,10 +258,13 @@ char *ppage0 = get_segment_base(0) + i * 4096; char *ppage1 = get_segment_base(1) + i * 4096; - /* two cases... either the mapping is (0->0, 1->1) or (0->1, - 1->0). Distinguish which case it is by hacking a lot */ + /* two cases for mapping pages to file-pages (fpages): + - (0->0, 1->1) + - (0->1, 1->0) + Distinguish which case it is by hacking a lot */ // 0->0,1->1 or 0->1,1->0 + /* map page 1 to fpage 0: */ d_remap_file_pages(ppage1, 4096, i); // 0->0,1->0 or 0->1,1->0 @@ -273,8 +276,13 @@ char newvalue1 = *ppage1; asm("":::"memory"); *ppage0 = oldvalue0; + /* if we are in 0->0,1->0, old and new are different: + In this case we are done. We keep the largemalloc + data structure and objects of ppage0/fpage0 */ if (oldvalue1 == newvalue1) { // 0->1,1->0 + /* ppage0/fpage1 has the data structure that we want + in ppage1/fpage0, so we copy it */ pagecopy(ppage1, ppage0); // copy from page0 to page1, // i.e. from the underlying memory seg1 to seg0 d_remap_file_pages(ppage0, 4096, i); @@ -339,7 +347,7 @@ /* first, if we're not seeing segment 0, we must change the flags in flag_page_private[] from PRIVATE_PAGE to - REMAPPING_PAGE, which will mean "can't re-share" */ + SEGMENT1_PAGE, which will mean "can't re-share" */ if (segment_base != stm_object_pages && RESHARE_PAGES) mark_flag_page_private(obj, segment_base); diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -15,7 +15,7 @@ #define GC_MAJOR_COLLECT 1.82 /* re-share pages after major collections (1 or 0) */ -#define RESHARE_PAGES 0 +#define RESHARE_PAGES 1 diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -156,6 +156,7 @@ they use only one thread */ set_gs_register(get_segment_base(num)); #endif + dprintf(("acquired same segment: %d\n", num)); goto got_num; } /* Look for the next free segment. If there is none, wait for @@ -165,6 +166,7 @@ num = (num + 1) % NB_SEGMENTS; if (sync_ctl.in_use[num] == 0) { /* we're getting 'num', a different number. */ + dprintf(("acquired different segment: %d->%d\n", tl->associated_segment_num, num)); tl->associated_segment_num = num; set_gs_register(get_segment_base(num)); goto got_num; From noreply at buildbot.pypy.org Mon Mar 3 18:32:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Mar 2014 18:32:42 +0100 (CET) Subject: [pypy-commit] stmgc default: More comments. Remove a line that is actually useless. Change the Message-ID: <20140303173242.D6D731C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r942:20b40ac2ddb1 Date: 2014-03-03 18:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/20b40ac2ddb1/ Log: More comments. Remove a line that is actually useless. Change the value we overwrite stm_large_free chunks with. Fix a test. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -53,8 +53,13 @@ uint8_t other_segment_num = prev_owner - 1; assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); + + /* Do generic contention management. If found that we must abort, + calls abort_with_mutex() and never returns. If found that the + other thread must abort, signal it with NSE_SIGABORT. Note that + this can overwrite a NSE_SIGPAUSE, which is fine. */ contention_management(other_segment_num); - assert(get_priv_segment(other_segment_num)->pub.nursery_end == NSE_SIGABORT); + assert(get_segment(other_segment_num)->nursery_end == NSE_SIGABORT); /* The rest of this code is for the case where we continue to run. We have to signal the other thread to abort, and wait @@ -64,12 +69,10 @@ switch (sp) { case SP_RUNNING: - /* The other thread is running now, so if we set - NSE_SIGABORT in 'nursery_end', it will soon enter a - mutex_lock() and thus abort. Note that this line can - overwrite a NSE_SIGPAUSE, which is fine. + /* The other thread is running now, so as NSE_SIGABORT was + set in its 'nursery_end', it will soon enter a + mutex_lock() and thus abort. */ - get_segment(other_segment_num)->nursery_end = NSE_SIGABORT; break; /* The other cases are where the other thread is at a diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -273,8 +273,8 @@ /* unlink the following chunk */ mscan->d.next->prev = mscan->d.prev; mscan->d.prev->next = mscan->d.next; - assert(mscan->prev_size = (size_t)-1); - assert(mscan->size = (size_t)-1); + assert((mscan->prev_size = (size_t)-258, 1)); /* 0xfffffffffffffefe */ + assert((mscan->size = (size_t)-515, 1)); /* 0xfffffffffffffdfd */ /* merge the two chunks */ assert(fsize == fscan->prev_size); diff --git a/c7/test/test_largemalloc.py b/c7/test/test_largemalloc.py --- a/c7/test/test_largemalloc.py +++ b/c7/test/test_largemalloc.py @@ -168,4 +168,4 @@ if i in keep_me: assert all[i][50] == chr(65 + i) else: - assert all_orig[i][50] == '\xDD' + assert all_orig[i][50] == '\xDE' From noreply at buildbot.pypy.org Mon Mar 3 22:20:44 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 22:20:44 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: another go at rawffi Message-ID: <20140303212044.BFBE51C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69641:ffc94b4dd771 Date: 2014-03-03 16:19 -0500 http://bitbucket.org/pypy/pypy/changeset/ffc94b4dd771/ Log: another go at rawffi diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -268,7 +268,7 @@ return x >> 16 def BIT_MASK(x, ll_t): - return (((1 << (x - 1)) - 1) << 1) + 1 + return (((widen(rffi.cast(ll_t, 1)) << (x - 1)) - 1) << 1) + 1 BIT_MASK._annspecialcase_ = 'specialize:arg(1)' def push_field(self, num, value): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -490,7 +490,7 @@ assert x.C == 1 x.free() - def test_structure_bitfields(self): + def test_structure_bitfields_varied(self): import _rawffi X = _rawffi.Structure([('A', 'I', 1), ('B', 'I', 2), @@ -504,37 +504,47 @@ assert x.C == -1 x.free() + def test_structure_bitfields_int(self): + import _rawffi Y = _rawffi.Structure([('a', 'i', 1), ('b', 'i', 30), ('c', 'i', 1)]) y = Y() - y.a, y.b, y.c = -1, -7, 0 - assert (y.a, y.b, y.c) == (-1, -7, 0) + y.a, y.b, y.c = -1, -7, 1 + assert (y.a, y.b, y.c) == (-1, -7, -1) y.free() - def test_structure_ulonglong_bitfields(self): + def test_structure_bitfields_uint(self): import _rawffi - X = _rawffi.Structure([('A', 'Q', 1), - ('B', 'Q', 62), - ('C', 'Q', 1)]) - x = X() - x.A, x.B, x.C = 7, 0x1000000000000001, 7 - assert x.A == 1 - assert x.B == 0x1000000000000001 - assert x.C == 1 - x.free() + Y = _rawffi.Structure([('a', 'I', 1), + ('b', 'I', 30), + ('c', 'I', 1)]) + y = Y() + y.a, y.b, y.c = 7, (1 << 29) | 1, 7 + assert (y.a, y.b, y.c) == (1, (1 << 29) | 1, 1) + y.free() - def test_structure_longlong_bitfields(self): + def test_structure_bitfields_longlong(self): import _rawffi Y = _rawffi.Structure([('a', 'q', 1), - ('b', 'q', 61), + ('b', 'q', 62), ('c', 'q', 1)]) y = Y() - y.a, y.b, y.c = 0, -7, 0 - assert (y.a, y.b, y.c) == (0, -7, 0) + y.a, y.b, y.c = -1, -7, 1 + assert (y.a, y.b, y.c) == (-1, -7, -1) y.free() - def test_structure_single_longbit_bitfield(self): + def test_structure_bitfields_ulonglong(self): + import _rawffi + Y = _rawffi.Structure([('a', 'Q', 1), + ('b', 'Q', 62), + ('c', 'Q', 1)]) + y = Y() + y.a, y.b, y.c = 7, (1 << 61) | 1, 7 + assert (y.a, y.b, y.c) == (1, (1 << 61) | 1, 1) + y.free() + + def test_structure_bitfields_single(self): import _rawffi for s in [('I', 32), ('Q', 64)]: Y = _rawffi.Structure([('a',) + s]) From noreply at buildbot.pypy.org Mon Mar 3 22:35:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 22:35:21 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: need this for 32bit Message-ID: <20140303213521.22FE61C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69642:35362e167655 Date: 2014-03-03 16:34 -0500 http://bitbucket.org/pypy/pypy/changeset/35362e167655/ Log: need this for 32bit diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -15,7 +15,7 @@ from pypy.module._rawffi.interp_rawffi import size_alignment from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib import clibffi, rgc -from rpython.rlib.rarithmetic import intmask, signedtype, widen, r_uint +from rpython.rlib.rarithmetic import intmask, signedtype, widen, r_uint, r_longlong from rpython.rtyper.lltypesystem import lltype, rffi @@ -309,7 +309,8 @@ if ll_t is lltype.Bool or signedtype(ll_t._type): sign = (value >> (numbits - 1)) & 1 if sign: - value = value - (1 << numbits) + one = r_longlong(1) if ll_t is lltype.SignedLongLong else 1 + value = value - (one << numbits) value = rffi.cast(ll_t, value) break return value From noreply at buildbot.pypy.org Mon Mar 3 23:49:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 23:49:39 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: more rawffi work Message-ID: <20140303224939.205171C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69643:456583a14be5 Date: 2014-03-03 17:47 -0500 http://bitbucket.org/pypy/pypy/changeset/456583a14be5/ Log: more rawffi work diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -15,7 +15,8 @@ from pypy.module._rawffi.interp_rawffi import size_alignment from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib import clibffi, rgc -from rpython.rlib.rarithmetic import intmask, signedtype, widen, r_uint, r_longlong +from rpython.rlib.rarithmetic import intmask, signedtype, widen, r_uint, \ + r_longlong, r_ulonglong from rpython.rtyper.lltypesystem import lltype, rffi @@ -268,7 +269,14 @@ return x >> 16 def BIT_MASK(x, ll_t): - return (((widen(rffi.cast(ll_t, 1)) << (x - 1)) - 1) << 1) + 1 + if ll_t is lltype.SignedLongLong: + one = r_longlong(1) + elif ll_t is lltype.UnsignedLongLong: + one = r_ulonglong(1) + else: + one = 1 + # to avoid left shift by x == sizeof(ll_t) + return (((one << (x - 1)) - 1) << 1) + 1 BIT_MASK._annspecialcase_ = 'specialize:arg(1)' def push_field(self, num, value): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -544,7 +544,19 @@ assert (y.a, y.b, y.c) == (1, (1 << 61) | 1, 1) y.free() - def test_structure_bitfields_single(self): + def test_structure_bitfields_single_signed(self): + import _rawffi + for s in [('i', 32), ('q', 64)]: + Y = _rawffi.Structure([('a',) + s]) + y = Y() + y.a = 10 + assert y.a == 10 + val = (1 << (s[1] - 1)) | 1 + y.a = val + assert y.a == val - (1 << s[1]) + y.free() + + def test_structure_bitfields_single_unsigned(self): import _rawffi for s in [('I', 32), ('Q', 64)]: Y = _rawffi.Structure([('a',) + s]) From noreply at buildbot.pypy.org Mon Mar 3 23:59:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 3 Mar 2014 23:59:20 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: add a test_ztranslation for _rawffi Message-ID: <20140303225920.289381C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69644:69bd023558d0 Date: 2014-03-03 17:58 -0500 http://bitbucket.org/pypy/pypy/changeset/69bd023558d0/ Log: add a test_ztranslation for _rawffi diff --git a/pypy/module/_rawffi/test/test_ztranslation.py b/pypy/module/_rawffi/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_rawffi/test/test_ztranslation.py @@ -0,0 +1,5 @@ +from pypy.objspace.fake.checkmodule import checkmodule + + +def test_checkmodule(): + checkmodule('_rawffi') diff --git a/pypy/module/struct/test/test_ztranslation.py b/pypy/module/struct/test/test_ztranslation.py --- a/pypy/module/struct/test/test_ztranslation.py +++ b/pypy/module/struct/test/test_ztranslation.py @@ -3,4 +3,3 @@ def test_checkmodule(): checkmodule('struct') - From noreply at buildbot.pypy.org Tue Mar 4 00:05:38 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 00:05:38 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix test_file failures on windows Message-ID: <20140303230538.8E3B21C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69645:e6b97653a8c7 Date: 2014-03-03 18:04 -0500 http://bitbucket.org/pypy/pypy/changeset/e6b97653a8c7/ Log: fix test_file failures on windows diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -252,7 +252,7 @@ assert os.strerror(errno.EBADF) in g.getvalue() # the following is a "nice to have" feature that CPython doesn't have if '__pypy__' in sys.builtin_module_names: - assert self.temppath in g.getvalue() + assert repr(self.temppath) in g.getvalue() class AppTestNonblocking(object): diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -201,12 +201,12 @@ def test_repr(self): assert repr(self.file).startswith( - " Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69646:54e2cb615ce2 Date: 2014-03-03 18:10 -0500 http://bitbucket.org/pypy/pypy/changeset/54e2cb615ce2/ Log: add a test_ztranslation for pwd diff --git a/pypy/module/pwd/test/test_ztranslation.py b/pypy/module/pwd/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/pwd/test/test_ztranslation.py @@ -0,0 +1,5 @@ +from pypy.objspace.fake.checkmodule import checkmodule + + +def test_checkmodule(): + checkmodule('pwd') From noreply at buildbot.pypy.org Tue Mar 4 00:58:25 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 00:58:25 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: raise exception on concurrent calls to poll() (cpython issue8865) Message-ID: <20140303235825.453F01C0865@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69647:b659126507d5 Date: 2014-03-03 18:55 -0500 http://bitbucket.org/pypy/pypy/changeset/b659126507d5/ Log: raise exception on concurrent calls to poll() (cpython issue8865) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -19,6 +19,7 @@ class Poll(W_Root): def __init__(self): self.fddict = {} + self.running = False @unwrap_spec(events="c_short") def register(self, space, w_fd, events=defaultevents): @@ -41,7 +42,7 @@ raise OperationError(space.w_KeyError, space.wrap(fd)) # XXX should this maybe be w_fd? - @unwrap_spec(w_timeout = WrappedDefault(None)) + @unwrap_spec(w_timeout=WrappedDefault(None)) def poll(self, space, w_timeout): if space.is_w(w_timeout, space.w_None): timeout = -1 @@ -56,6 +57,9 @@ "timeout must be an integer or None") timeout = space.c_int_w(w_timeout) + if self.running: + raise oefmt(space.w_RuntimeError, "concurrent poll() invocation") + self.running = True try: retval = rpoll.poll(self.fddict, timeout) except rpoll.PollError, e: @@ -64,6 +68,8 @@ raise OperationError(w_errortype, space.newtuple([space.wrap(e.errno), space.wrap(message)])) + finally: + self.running = False retval_w = [] for fd, revents in retval: diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -1,5 +1,4 @@ import sys - import py from pypy.interpreter.error import OperationError @@ -234,7 +233,7 @@ class AppTestSelectWithPipes(_AppTestSelect): "Use a pipe to get pairs of file descriptors" spaceconfig = { - "usemodules": ["select", "rctime"] + "usemodules": ["select", "rctime", "thread"] } def setup_class(cls): @@ -258,6 +257,37 @@ s1, s2 = os.pipe() return FileAsSocket(s1), FileAsSocket(s2) + def test_poll_threaded(self): + import os, select, threading, time + if not hasattr(select, 'poll'): + skip("no select.poll() on this platform") + r, w = os.pipe() + rfds = [os.dup(r) for _ in range(10)] + try: + pollster = select.poll() + for fd in rfds: + pollster.register(fd, select.POLLIN) + + t = threading.Thread(target=pollster.poll) + t.start() + try: + time.sleep(0.5); print '', # print to release GIL untranslated + # trigger ufds array reallocation + for fd in rfds: + pollster.unregister(fd) + pollster.register(w, select.POLLOUT) + exc = raises(RuntimeError, pollster.poll) + assert exc.value[0] == 'concurrent poll() invocation' + finally: + # and make the call to poll() from the thread return + os.write(w, b'spam') + t.join() + finally: + os.close(r) + os.close(w) + for fd in rfds: + os.close(fd) + class AppTestSelectWithSockets(_AppTestSelect): """Same tests with connected sockets. From noreply at buildbot.pypy.org Tue Mar 4 02:21:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 02:21:09 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: handle eintr in multiprocessing Message-ID: <20140304012109.B75AC1C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69648:69c907ade980 Date: 2014-03-03 20:19 -0500 http://bitbucket.org/pypy/pypy/changeset/69c907ade980/ Log: handle eintr in multiprocessing diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -1,4 +1,5 @@ import sys +from errno import EINTR from rpython.rlib import rpoll, rsocket from rpython.rlib.rarithmetic import intmask @@ -306,6 +307,9 @@ try: count = self.WRITE(data) except OSError, e: + if e.errno == EINTR: + space.getexecutioncontext().checksignals() + continue raise wrap_oserror(space, e) size -= count message = rffi.ptradd(message, count) @@ -317,6 +321,9 @@ try: data = self.READ(remaining) except OSError, e: + if e.errno == EINTR: + space.getexecutioncontext().checksignals() + continue raise wrap_oserror(space, e) count = len(data) if count == 0: @@ -340,10 +347,8 @@ def do_poll(self, space, timeout): if not self._check_fd(): - raise OperationError(space.w_IOError, space.wrap( - "handle out of range in select()")) - - r, w, e = rpoll.select([self.fd], [], [], timeout) + raise oefmt(space.w_IOError, "handle out of range in select()") + r, w, e = rpoll.select([self.fd], [], [], timeout, handle_eintr=True) return bool(r) W_FileConnection.typedef = TypeDef( diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -1,4 +1,3 @@ - from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.tool import rffi_platform as platform @@ -40,7 +39,7 @@ 'net/if.h') cond_includes = [('AF_NETLINK', 'linux/netlink.h')] - + libraries = () calling_conv = 'c' HEADER = ''.join(['#include <%s>\n' % filename for filename in includes]) @@ -612,11 +611,11 @@ WSAPROTOCOL_INFO = cConfig.WSAPROTOCOL_INFO FROM_PROTOCOL_INFO = cConfig.FROM_PROTOCOL_INFO - WSADuplicateSocket = external('WSADuplicateSocketA', + WSADuplicateSocket = external('WSADuplicateSocketA', [socketfd_type, rwin32.DWORD, lltype.Ptr(WSAPROTOCOL_INFO)], rffi.INT) - WSASocket = external('WSASocketA', + WSASocket = external('WSASocketA', [rffi.INT, rffi.INT, rffi.INT, lltype.Ptr(WSAPROTOCOL_INFO), rwin32.DWORD, rwin32.DWORD], diff --git a/rpython/rlib/rpoll.py b/rpython/rlib/rpoll.py --- a/rpython/rlib/rpoll.py +++ b/rpython/rlib/rpoll.py @@ -5,6 +5,7 @@ function that directly takes a dictionary as argument. """ +from errno import EINTR from rpython.rlib import _rsocket_rffi as _c from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.lltypesystem import lltype, rffi @@ -71,9 +72,9 @@ lltype.free(pollfds, flavor='raw') return retval -def select(inl, outl, excl, timeout=-1.0): +def select(inl, outl, excl, timeout=-1.0, handle_eintr=False): nfds = 0 - if inl: + if inl: ll_inl = lltype.malloc(_c.fd_set.TO, flavor='raw') _c.FD_ZERO(ll_inl) for i in inl: @@ -82,7 +83,7 @@ nfds = i else: ll_inl = lltype.nullptr(_c.fd_set.TO) - if outl: + if outl: ll_outl = lltype.malloc(_c.fd_set.TO, flavor='raw') _c.FD_ZERO(ll_outl) for i in outl: @@ -91,7 +92,7 @@ nfds = i else: ll_outl = lltype.nullptr(_c.fd_set.TO) - if excl: + if excl: ll_excl = lltype.malloc(_c.fd_set.TO, flavor='raw') _c.FD_ZERO(ll_excl) for i in excl: @@ -100,15 +101,23 @@ nfds = i else: ll_excl = lltype.nullptr(_c.fd_set.TO) - if timeout != -1.0: + + if timeout < 0: + ll_timeval = lltype.nullptr(_c.timeval) + while True: + res = _c.select(nfds + 1, ll_inl, ll_outl, ll_excl, ll_timeval) + if not handle_eintr or res >= 0 or _c.geterrno() != EINTR: + break + else: + sec = int(timeout) + usec = int((timeout - sec) * 10**6) ll_timeval = rffi.make(_c.timeval) - rffi.setintfield(ll_timeval, 'c_tv_sec', int(timeout)) - rffi.setintfield(ll_timeval, 'c_tv_usec', int((timeout-int(timeout)) - * 1000000)) - else: - ll_timeval = lltype.nullptr(_c.timeval) + rffi.setintfield(ll_timeval, 'c_tv_sec', sec) + rffi.setintfield(ll_timeval, 'c_tv_usec', usec) + res = _c.select(nfds + 1, ll_inl, ll_outl, ll_excl, ll_timeval) + if handle_eintr and res < 0 and _c.geterrno() == EINTR: + res = 0 # interrupted, act as timed out try: - res = _c.select(nfds + 1, ll_inl, ll_outl, ll_excl, ll_timeval) if res == -1: raise SelectError(_c.geterrno()) if res == 0: From noreply at buildbot.pypy.org Tue Mar 4 02:36:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 02:36:53 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: add ztranslation for multiprocessing Message-ID: <20140304013653.A50B61C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69649:12a16da32cc5 Date: 2014-03-03 20:36 -0500 http://bitbucket.org/pypy/pypy/changeset/12a16da32cc5/ Log: add ztranslation for multiprocessing diff --git a/pypy/module/_multiprocessing/test/test_ztranslation.py b/pypy/module/_multiprocessing/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_multiprocessing/test/test_ztranslation.py @@ -0,0 +1,5 @@ +from pypy.objspace.fake.checkmodule import checkmodule + + +def test_checkmodule(): + checkmodule('_multiprocessing') From noreply at buildbot.pypy.org Tue Mar 4 05:28:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 05:28:56 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: typo Message-ID: <20140304042856.C0D691C3427@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69650:b1855ef76188 Date: 2014-03-03 21:05 -0500 http://bitbucket.org/pypy/pypy/changeset/b1855ef76188/ Log: typo diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -181,7 +181,7 @@ data = stream.read(n) except OSError, e: # a special-case only for read() (similar to CPython, which - # also looses partial data with other methods): if we get + # also loses partial data with other methods): if we get # EAGAIN after already some data was received, return it. if is_wouldblock_error(e): got = result.build() From noreply at buildbot.pypy.org Tue Mar 4 05:28:57 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 05:28:57 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix uid conversion in pwd Message-ID: <20140304042857.E8D261C3427@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69651:1c0c58a72c5b Date: 2014-03-03 23:15 -0500 http://bitbucket.org/pypy/pypy/changeset/1c0c58a72c5b/ Log: fix uid conversion in pwd diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -3,7 +3,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from rpython.rlib.rarithmetic import widen +from rpython.rlib.rarithmetic import r_uint eci = ExternalCompilationInfo(includes=['pwd.h']) @@ -45,6 +45,32 @@ c_getpwent = external("getpwent", [], passwd_p) c_endpwent = external("endpwent", [], lltype.Void) + +def uid_converter(space, w_uid): + try: + val = space.int_w(w_uid) + if val == -1: + return rffi.cast(uid_t, -1) + elif val < 0: + raise oefmt(space.w_OverflowError, "user id is less than minimum") + else: + val = r_uint(val) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + try: + val = space.uint_w(w_uid) + except OperationError, e: + if e.match(space, space.w_ValueError): + raise oefmt(space.w_OverflowError, "user id is less than minimum") + elif e.match(space, space.w_OverflowError): + raise oefmt(space.w_OverflowError, "user id is greater than maximum") + raise + uid = rffi.cast(uid_t, val) + if val != uid: + raise oefmt(space.w_OverflowError, "user id is greater than maximum") + return uid + def make_struct_passwd(space, pw): w_passwd_struct = space.getattr(space.getbuiltinmodule('pwd'), space.wrap('struct_passwd')) @@ -69,23 +95,16 @@ """ msg = "getpwuid(): uid not found" try: - val = space.int_w(w_uid) - uid = widen(rffi.cast(uid_t, val)) - if val == -1: - pass - elif val < 0 or uid != val: - raise OperationError(space.w_OverflowError, None) + uid = uid_converter(space, w_uid) except OperationError, e: if e.match(space, space.w_OverflowError): raise oefmt(space.w_KeyError, msg) raise pw = c_getpwuid(uid) if not pw: - raise OperationError(space.w_KeyError, space.wrap( - "%s: %d" % (msg, uid))) + raise oefmt(space.w_KeyError, "%s: %d", msg, uid) return make_struct_passwd(space, pw) - @unwrap_spec(name=str) def getpwnam(space, name): """ @@ -99,7 +118,6 @@ raise oefmt(space.w_KeyError, "getpwnam(): name not found: %s", name) return make_struct_passwd(space, pw) - def getpwall(space): users_w = [] c_setpwent() From noreply at buildbot.pypy.org Tue Mar 4 05:29:31 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 4 Mar 2014 05:29:31 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140304042931.6D7781C3427@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69652:cd4e6aa5d739 Date: 2014-02-21 15:54 -0800 http://bitbucket.org/pypy/pypy/changeset/cd4e6aa5d739/ Log: merge default diff too long, truncating to 2000 out of 4512 lines diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -244,14 +244,16 @@ IntOption("methodcachesizeexp", " 2 ** methodcachesizeexp is the size of the of the method cache ", default=11), - BoolOption("optimized_int_add", - "special case the addition of two integers in BINARY_ADD", + BoolOption("intshortcut", + "special case addition and subtraction of two integers in BINARY_ADD/" + "/BINARY_SUBTRACT and their inplace counterparts", default=False), BoolOption("optimized_list_getitem", "special case the 'list[integer]' expressions", default=False), BoolOption("builtinshortcut", - "a shortcut for operations between built-in types", + "a shortcut for operations between built-in types. XXX: " + "deprecated, not really a shortcut any more.", default=False), BoolOption("getattributeshortcut", "track types that override __getattribute__", @@ -294,7 +296,7 @@ if level in ['2', '3', 'jit']: config.objspace.std.suggest(withmethodcache=True) config.objspace.std.suggest(withprebuiltchar=True) - config.objspace.std.suggest(builtinshortcut=True) + config.objspace.std.suggest(intshortcut=True) config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) #config.objspace.std.suggest(newshortcut=True) diff --git a/pypy/doc/config/objspace.std.intshortcut.txt b/pypy/doc/config/objspace.std.intshortcut.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.intshortcut.txt @@ -0,0 +1,2 @@ +Optimize the addition and subtraction of two integers. Enabling this +option gives small speedups. diff --git a/pypy/doc/config/objspace.std.optimized_int_add.txt b/pypy/doc/config/objspace.std.optimized_int_add.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.optimized_int_add.txt +++ /dev/null @@ -1,2 +0,0 @@ -Optimize the addition of two integers a bit. Enabling this option gives small -speedups. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -82,3 +82,6 @@ .. branch: bounds-int-add-or Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the operands are positive to kill some guards + +.. branch: remove-intlong-smm +kills int/long/smalllong/bool multimethods diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -189,7 +189,7 @@ def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) - return w_obj.boolval + return bool(w_obj.intval) def is_w(self, w_obj, w_what): return w_obj is w_what @@ -257,7 +257,7 @@ class BoolObject(W_Root): tp = FakeSpace.w_bool def __init__(self, boolval): - self.boolval = boolval + self.intval = boolval class IntObject(W_Root): tp = FakeSpace.w_int diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -6,6 +6,7 @@ from pypy.objspace.std.floattype import float_typedef from pypy.objspace.std.longtype import long_typedef from pypy.objspace.std.unicodeobject import W_UnicodeObject +from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.complextype import complex_typedef from rpython.rlib.rarithmetic import LONG_BIT from rpython.rtyper.lltypesystem import rffi @@ -19,8 +20,8 @@ from pypy.module.micronumpy.constants import * -MIXIN_32 = (long_typedef,) if LONG_BIT == 32 else () -MIXIN_64 = (long_typedef,) if LONG_BIT == 64 else () +MIXIN_32 = (W_IntObject.typedef,) if LONG_BIT == 32 else () +MIXIN_64 = (W_IntObject.typedef,) if LONG_BIT == 64 else () #long_double_size = rffi.sizeof_c_type('long double', ignore_errors=True) #import os @@ -710,7 +711,7 @@ ) W_LongBox.typedef = TypeDef("int%d" % LONG_BIT, - (W_SignedIntegerBox.typedef, long_typedef), + (W_SignedIntegerBox.typedef, W_IntObject.typedef), __module__ = "numpy", __new__ = interp2app(W_LongBox.descr__new__.im_func), __index__ = interp2app(W_LongBox.descr_index), diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py --- a/pypy/module/pypyjit/test/test_policy.py +++ b/pypy/module/pypyjit/test/test_policy.py @@ -3,8 +3,8 @@ pypypolicy = policy.PyPyJitPolicy() def test_id_any(): - from pypy.objspace.std.intobject import add__Int_Int - assert pypypolicy.look_inside_function(add__Int_Int) + from pypy.objspace.std.intobject import W_IntObject + assert pypypolicy.look_inside_function(W_IntObject.descr_add) def test_bigint(): from rpython.rlib.rbigint import rbigint diff --git a/pypy/objspace/std/boolobject.py b/pypy/objspace/std/boolobject.py --- a/pypy/objspace/std/boolobject.py +++ b/pypy/objspace/std/boolobject.py @@ -1,79 +1,110 @@ -from rpython.rlib.rbigint import rbigint +"""The builtin bool implementation""" + +import operator + from rpython.rlib.rarithmetic import r_uint -from pypy.interpreter.error import OperationError -from pypy.objspace.std import newformat -from pypy.objspace.std.model import registerimplementation, W_Object -from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.intobject import W_IntObject +from rpython.tool.sourcetools import func_renamer, func_with_new_name +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec +from pypy.objspace.std.intobject import W_AbstractIntObject, W_IntObject +from pypy.objspace.std.stdtypedef import StdTypeDef -class W_BoolObject(W_Object): - from pypy.objspace.std.booltype import bool_typedef as typedef - _immutable_fields_ = ['boolval'] + +class W_BoolObject(W_IntObject): def __init__(self, boolval): - self.boolval = not not boolval + self.intval = not not boolval def __nonzero__(self): raise Exception("you cannot do that, you must use space.is_true()") def __repr__(self): - """ representation for debugging purposes """ - return "%s(%s)" % (self.__class__.__name__, self.boolval) + """representation for debugging purposes""" + return "%s(%s)" % (self.__class__.__name__, bool(self.intval)) + + def is_w(self, space, w_other): + return self is w_other + + def immutable_unique_id(self, space): + return None def unwrap(self, space): - return self.boolval - - def int_w(self, space): - return int(self.boolval) + return bool(self.intval) def uint_w(self, space): - intval = int(self.boolval) - return r_uint(intval) - - def bigint_w(self, space): - return rbigint.fromint(int(self.boolval)) - - def float_w(self, space): - return float(self.boolval) + return r_uint(self.intval) def int(self, space): - return space.newint(int(self.boolval)) + return space.newint(self.intval) -registerimplementation(W_BoolObject) + @staticmethod + @unwrap_spec(w_obj=WrappedDefault(False)) + def descr_new(space, w_booltype, w_obj): + """T.__new__(S, ...) -> a new object with type S, a subtype of T""" + space.w_bool.check_user_subclass(w_booltype) + return space.newbool(space.is_true(w_obj)) + + def descr_repr(self, space): + return space.wrap('True' if self.intval else 'False') + descr_str = func_with_new_name(descr_repr, 'descr_str') + + def descr_nonzero(self, space): + return self + + def _make_bitwise_binop(opname): + descr_name = 'descr_' + opname + int_op = getattr(W_IntObject, descr_name) + op = getattr(operator, + opname + '_' if opname in ('and', 'or') else opname) + + @func_renamer(descr_name) + def descr_binop(self, space, w_other): + if not isinstance(w_other, W_BoolObject): + return int_op(self, space, w_other) + a = bool(self.intval) + b = bool(w_other.intval) + return space.newbool(op(a, b)) + + @func_renamer('descr_r' + opname) + def descr_rbinop(self, space, w_other): + return descr_binop(self, space, w_other) + + return descr_binop, descr_rbinop + + descr_and, descr_rand = _make_bitwise_binop('and') + descr_or, descr_ror = _make_bitwise_binop('or') + descr_xor, descr_rxor = _make_bitwise_binop('xor') + W_BoolObject.w_False = W_BoolObject(False) -W_BoolObject.w_True = W_BoolObject(True) +W_BoolObject.w_True = W_BoolObject(True) -# bool-to-int delegation requires translating the .boolvar attribute -# to an .intval one -def delegate_Bool2IntObject(space, w_bool): - return W_IntObject(int(w_bool.boolval)) +W_BoolObject.typedef = StdTypeDef("bool", W_IntObject.typedef, + __doc__ = """bool(x) -> bool -def nonzero__Bool(space, w_bool): - return w_bool +Returns True when the argument x is true, False otherwise. +The builtins True and False are the only two instances of the class bool. +The class bool is a subclass of the class int, and cannot be subclassed.""", + __new__ = interp2app(W_BoolObject.descr_new), + __repr__ = interp2app(W_BoolObject.descr_repr, + doc=W_AbstractIntObject.descr_repr.__doc__), + __str__ = interp2app(W_BoolObject.descr_str, + doc=W_AbstractIntObject.descr_str.__doc__), + __nonzero__ = interp2app(W_BoolObject.descr_nonzero, + doc=W_AbstractIntObject.descr_nonzero.__doc__), -def repr__Bool(space, w_bool): - if w_bool.boolval: - return space.wrap('True') - else: - return space.wrap('False') - -def and__Bool_Bool(space, w_bool1, w_bool2): - return space.newbool(w_bool1.boolval & w_bool2.boolval) - -def or__Bool_Bool(space, w_bool1, w_bool2): - return space.newbool(w_bool1.boolval | w_bool2.boolval) - -def xor__Bool_Bool(space, w_bool1, w_bool2): - return space.newbool(w_bool1.boolval ^ w_bool2.boolval) - -str__Bool = repr__Bool - -def format__Bool_ANY(space, w_bool, w_format_spec): - return newformat.run_formatter( - space, w_format_spec, "format_int_or_long", w_bool, - newformat.INT_KIND) - -register_all(vars()) + __and__ = interp2app(W_BoolObject.descr_and, + doc=W_AbstractIntObject.descr_and.__doc__), + __rand__ = interp2app(W_BoolObject.descr_rand, + doc=W_AbstractIntObject.descr_rand.__doc__), + __or__ = interp2app(W_BoolObject.descr_or, + doc=W_AbstractIntObject.descr_or.__doc__), + __ror__ = interp2app(W_BoolObject.descr_ror, + doc=W_AbstractIntObject.descr_ror.__doc__), + __xor__ = interp2app(W_BoolObject.descr_xor, + doc=W_AbstractIntObject.descr_xor.__doc__), + __rxor__ = interp2app(W_BoolObject.descr_rxor, + doc=W_AbstractIntObject.descr_rxor.__doc__), + ) +W_BoolObject.typedef.acceptable_as_base_class = False diff --git a/pypy/objspace/std/booltype.py b/pypy/objspace/std/booltype.py deleted file mode 100644 --- a/pypy/objspace/std/booltype.py +++ /dev/null @@ -1,23 +0,0 @@ -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.objspace.std.stdtypedef import StdTypeDef -from pypy.objspace.std.longtype import long_typedef - - at unwrap_spec(w_obj = WrappedDefault(False)) -def descr__new__(space, w_booltype, w_obj): - space.w_bool.check_user_subclass(w_booltype) - if space.is_true(w_obj): - return space.w_True - else: - return space.w_False - -# ____________________________________________________________ - -bool_typedef = StdTypeDef("bool", long_typedef, - __doc__ = '''bool(x) -> bool - -Returns True when the argument x is true, False otherwise. -The builtins True and False are the only two instances of the class bool. -The class bool is a subclass of the class int, and cannot be subclassed.''', - __new__ = interp2app(descr__new__), - ) -bool_typedef.acceptable_as_base_class = False diff --git a/pypy/objspace/std/builtinshortcut.py b/pypy/objspace/std/builtinshortcut.py --- a/pypy/objspace/std/builtinshortcut.py +++ b/pypy/objspace/std/builtinshortcut.py @@ -117,7 +117,7 @@ # always directly return a Bool; however, the __len__ method # of built-in objects typically returns an unwrappable integer if isinstance(w_res, W_BoolObject): - return w_res.boolval + return bool(w_res.intval) try: return space.int_w(w_res) != 0 except OperationError: diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -1,6 +1,7 @@ from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.objspace.std import newformat +from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all from pypy.objspace.std.floatobject import W_FloatObject, _hash_float @@ -113,7 +114,7 @@ def delegate_Bool2Complex(space, w_bool): - return W_ComplexObject(w_bool.boolval, 0.0) + return W_ComplexObject(w_bool.intval, 0.0) def delegate_Int2Complex(space, w_int): return W_ComplexObject(w_int.intval, 0.0) @@ -189,17 +190,21 @@ if w_complex1.imagval: return space.w_False return space.eq(space.newfloat(w_complex1.realval), w_long2) +eq__Complex_Int = eq__Complex_Long def eq__Long_Complex(space, w_long1, w_complex2): return eq__Complex_Long(space, w_complex2, w_long1) +eq__Int_Complex = eq__Long_Complex def ne__Complex_Long(space, w_complex1, w_long2): if w_complex1.imagval: return space.w_True return space.ne(space.newfloat(w_complex1.realval), w_long2) +ne__Complex_Int = ne__Complex_Long def ne__Long_Complex(space, w_long1, w_complex2): return ne__Complex_Long(space, w_complex2, w_long1) +ne__Int_Complex = ne__Long_Complex def lt__Complex_Complex(space, w_complex1, w_complex2): from pypy.objspace.std.model import FailedToImplement diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -59,7 +59,7 @@ # bool-to-float delegation def delegate_Bool2Float(space, w_bool): - return W_FloatObject(float(w_bool.boolval)) + return W_FloatObject(float(w_bool.intval)) # int-to-float delegation def delegate_Int2Float(space, w_intobj): @@ -538,20 +538,3 @@ from pypy.objspace.std import floattype register_all(vars(), floattype) - -# pow delegation for negative 2nd arg -def pow_neg__Long_Long_None(space, w_int1, w_int2, thirdarg): - w_float1 = delegate_Long2Float(space, w_int1) - w_float2 = delegate_Long2Float(space, w_int2) - return pow__Float_Float_ANY(space, w_float1, w_float2, thirdarg) - -model.MM.pow.register(pow_neg__Long_Long_None, W_LongObject, W_LongObject, - W_NoneObject, order=1) - -def pow_neg__Int_Int_None(space, w_int1, w_int2, thirdarg): - w_float1 = delegate_Int2Float(space, w_int1) - w_float2 = delegate_Int2Float(space, w_int2) - return pow__Float_Float_ANY(space, w_float1, w_float2, thirdarg) - -model.MM.pow.register(pow_neg__Int_Int_None, W_IntObject, W_IntObject, - W_NoneObject, order=2) diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py --- a/pypy/objspace/std/floattype.py +++ b/pypy/objspace/std/floattype.py @@ -50,7 +50,7 @@ try: return rfloat.string_to_float(string) except ParseStringError as e: - from pypy.objspace.std.inttype import wrap_parsestringerror + from pypy.objspace.std.intobject import wrap_parsestringerror raise wrap_parsestringerror(space, e, w_source) diff --git a/pypy/objspace/std/frame.py b/pypy/objspace/std/frame.py --- a/pypy/objspace/std/frame.py +++ b/pypy/objspace/std/frame.py @@ -2,61 +2,87 @@ import operator -from rpython.rlib.unroll import unrolling_iterable -from pypy.interpreter import pyopcode +from rpython.rlib.rarithmetic import ovfcheck +from rpython.tool.sourcetools import func_renamer + from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.error import OperationError -from pypy.objspace.std import intobject -from pypy.objspace.std.multimethod import FailedToImplement +from pypy.interpreter.error import oefmt +from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.listobject import W_ListObject class BaseFrame(PyFrame): """These opcodes are always overridden.""" - def LIST_APPEND(f, oparg, next_instr): - w = f.popvalue() - v = f.peekvalue(oparg - 1) + def LIST_APPEND(self, oparg, next_instr): + w = self.popvalue() + v = self.peekvalue(oparg - 1) if type(v) is W_ListObject: v.append(w) else: raise AssertionError -def int_BINARY_ADD(f, oparg, next_instr): - w_2 = f.popvalue() - w_1 = f.popvalue() - if (type(w_1) is intobject.W_IntObject and - type(w_2) is intobject.W_IntObject): - try: - w_result = intobject.add__Int_Int(f.space, w_1, w_2) - except FailedToImplement: - w_result = f.space.add(w_1, w_2) +def _intshortcut(spaceopname): + if spaceopname.startswith('inplace_'): + opname = spaceopname[len('inplace_'):] + funcprefix = 'int_' else: - w_result = f.space.add(w_1, w_2) - f.pushvalue(w_result) + opname = spaceopname + funcprefix = 'int_BINARY_' + op = getattr(operator, opname) + int_op = getattr(W_IntObject, 'descr_' + opname) + @func_renamer(funcprefix + spaceopname.upper()) + def opimpl(self, oparg, next_instr): + space = self.space + space_op = getattr(space, spaceopname) -def list_BINARY_SUBSCR(f, oparg, next_instr): - w_2 = f.popvalue() - w_1 = f.popvalue() - if type(w_1) is W_ListObject and type(w_2) is intobject.W_IntObject: + w_2 = self.popvalue() + w_1 = self.popvalue() + if type(w_1) is W_IntObject and type(w_2) is W_IntObject: + try: + z = ovfcheck(op(w_1.intval, w_2.intval)) + except OverflowError: + w_result = int_op(w_1, space, w_2) + else: + w_result = space.newint(z) + else: + w_result = space_op(w_1, w_2) + self.pushvalue(w_result) + + return opimpl + + +int_BINARY_ADD = _intshortcut('add') +int_INPLACE_ADD = _intshortcut('inplace_add') +int_BINARY_SUBTRACT = _intshortcut('sub') +int_INPLACE_SUBTRACT = _intshortcut('inplace_sub') + + +def list_BINARY_SUBSCR(self, oparg, next_instr): + space = self.space + w_2 = self.popvalue() + w_1 = self.popvalue() + if type(w_1) is W_ListObject and type(w_2) is W_IntObject: try: w_result = w_1.getitem(w_2.intval) except IndexError: - raise OperationError(f.space.w_IndexError, - f.space.wrap("list index out of range")) + raise oefmt(space.w_IndexError, "list index out of range") else: - w_result = f.space.getitem(w_1, w_2) - f.pushvalue(w_result) + w_result = space.getitem(w_1, w_2) + self.pushvalue(w_result) def build_frame(space): """Consider the objspace config and return a patched frame object.""" class StdObjSpaceFrame(BaseFrame): pass - if space.config.objspace.std.optimized_int_add: + if space.config.objspace.std.intshortcut: StdObjSpaceFrame.BINARY_ADD = int_BINARY_ADD + StdObjSpaceFrame.INPLACE_ADD = int_INPLACE_ADD + StdObjSpaceFrame.BINARY_SUBTRACT = int_BINARY_SUBTRACT + StdObjSpaceFrame.INPLACE_SUBTRACT = int_INPLACE_SUBTRACT if space.config.objspace.std.optimized_list_getitem: StdObjSpaceFrame.BINARY_SUBSCR = list_BINARY_SUBSCR from pypy.objspace.std.callmethod import LOOKUP_METHOD, CALL_METHOD diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -5,25 +5,302 @@ for overflows, something CPython does not do anymore. """ +import operator +import sys + from rpython.rlib import jit -from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int, ovfcheck, r_uint +from rpython.rlib.objectmodel import instantiate, import_from_mixin, specialize +from rpython.rlib.rarithmetic import ( + LONG_BIT, is_valid_int, ovfcheck, r_longlong, r_uint, string_to_int) from rpython.rlib.rbigint import rbigint +from rpython.rlib.rstring import ( + InvalidBaseError, ParseStringError, ParseStringOverflowError) +from rpython.tool.sourcetools import func_renamer, func_with_new_name -from pypy.interpreter.error import OperationError +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.buffer import Buffer +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.objspace.std import newformat -from pypy.objspace.std.inttype import W_AbstractIntObject -from pypy.objspace.std.model import W_Object, registerimplementation -from pypy.objspace.std.multimethod import FailedToImplementArgs -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.model import ( + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT) +from pypy.objspace.std.stdtypedef import StdTypeDef + + +SENTINEL = object() + + +class W_AbstractIntObject(W_Root): + + __slots__ = () + + def is_w(self, space, w_other): + from pypy.objspace.std.boolobject import W_BoolObject + if (not isinstance(w_other, W_AbstractIntObject) or + isinstance(w_other, W_BoolObject)): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + return space.int_w(self) == space.int_w(w_other) + + def immutable_unique_id(self, space): + if self.user_overridden_class: + return None + b = space.bigint_w(self) + b = b.lshift(3).or_(rbigint.fromint(IDTAG_INT)) + return space.newlong_from_rbigint(b) + + def int(self, space): + """x.__int__() <==> int(x)""" + raise NotImplementedError + + def descr_format(self, space, w_format_spec): + raise NotImplementedError + + def descr_pow(self, space, w_exponent, w_modulus=None): + """x.__pow__(y[, z]) <==> pow(x, y[, z])""" + raise NotImplementedError + descr_rpow = func_with_new_name(descr_pow, 'descr_rpow') + descr_rpow.__doc__ = "y.__rpow__(x[, z]) <==> pow(x, y[, z])" + + def _abstract_unaryop(opname, doc=SENTINEL): + if doc is SENTINEL: + doc = 'x.__%s__() <==> %s(x)' % (opname, opname) + @func_renamer('descr_' + opname) + def descr_unaryop(self, space): + raise NotImplementedError + descr_unaryop.__doc__ = doc + return descr_unaryop + + descr_repr = _abstract_unaryop('repr') + descr_str = _abstract_unaryop('str') + + descr_coerce = _abstract_unaryop('coerce') + descr_conjugate = _abstract_unaryop( + 'conjugate', "Returns self, the complex conjugate of any int.") + descr_bit_length = _abstract_unaryop('bit_length', """\ + int.bit_length() -> int + + Number of bits necessary to represent self in binary. + >>> bin(37) + '0b100101' + >>> (37).bit_length() + 6""") + descr_hash = _abstract_unaryop('hash') + descr_oct = _abstract_unaryop('oct') + descr_hex = _abstract_unaryop('hex') + descr_getnewargs = _abstract_unaryop('getnewargs', None) + + descr_long = _abstract_unaryop('long') + descr_index = _abstract_unaryop( + 'index', "x[y:z] <==> x[y.__index__():z.__index__()]") + descr_trunc = _abstract_unaryop('trunc', + "Truncating an Integral returns itself.") + descr_float = _abstract_unaryop('float') + + descr_pos = _abstract_unaryop('pos', "x.__pos__() <==> +x") + descr_neg = _abstract_unaryop('neg', "x.__neg__() <==> -x") + descr_abs = _abstract_unaryop('abs') + descr_nonzero = _abstract_unaryop('nonzero', "x.__nonzero__() <==> x != 0") + descr_invert = _abstract_unaryop('invert', "x.__invert__() <==> ~x") + + def _abstract_cmpop(opname): + @func_renamer('descr_' + opname) + def descr_cmp(self, space, w_other): + raise NotImplementedError + descr_cmp.__doc__ = 'x.__%s__(y) <==> x%sy' % (opname, CMP_OPS[opname]) + return descr_cmp + + descr_lt = _abstract_cmpop('lt') + descr_le = _abstract_cmpop('le') + descr_eq = _abstract_cmpop('eq') + descr_ne = _abstract_cmpop('ne') + descr_gt = _abstract_cmpop('gt') + descr_ge = _abstract_cmpop('ge') + + def _abstract_binop(opname): + oper = BINARY_OPS.get(opname) + if oper == '%': + oper = '%%' + oper = '%s(%%s, %%s)' % opname if not oper else '%%s%s%%s' % oper + @func_renamer('descr_' + opname) + def descr_binop(self, space, w_other): + raise NotImplementedError + descr_binop.__doc__ = "x.__%s__(y) <==> %s" % (opname, + oper % ('x', 'y')) + descr_rbinop = func_with_new_name(descr_binop, 'descr_r' + opname) + descr_rbinop.__doc__ = "x.__r%s__(y) <==> %s" % (opname, + oper % ('y', 'x')) + return descr_binop, descr_rbinop + + descr_add, descr_radd = _abstract_binop('add') + descr_sub, descr_rsub = _abstract_binop('sub') + descr_mul, descr_rmul = _abstract_binop('mul') + + descr_and, descr_rand = _abstract_binop('and') + descr_or, descr_ror = _abstract_binop('or') + descr_xor, descr_rxor = _abstract_binop('xor') + + descr_lshift, descr_rlshift = _abstract_binop('lshift') + descr_rshift, descr_rrshift = _abstract_binop('rshift') + + descr_floordiv, descr_rfloordiv = _abstract_binop('floordiv') + descr_div, descr_rdiv = _abstract_binop('div') + descr_truediv, descr_rtruediv = _abstract_binop('truediv') + descr_mod, descr_rmod = _abstract_binop('mod') + descr_divmod, descr_rdivmod = _abstract_binop('divmod') + + +def _floordiv(space, x, y): + try: + z = ovfcheck(x // y) + except ZeroDivisionError: + raise oefmt(space.w_ZeroDivisionError, "integer division by zero") + return wrapint(space, z) +_div = func_with_new_name(_floordiv, '_div') + + +def _truediv(space, x, y): + a = float(x) + b = float(y) + if b == 0.0: + raise oefmt(space.w_ZeroDivisionError, "division by zero") + return space.wrap(a / b) + + +def _mod(space, x, y): + try: + z = ovfcheck(x % y) + except ZeroDivisionError: + raise oefmt(space.w_ZeroDivisionError, "integer modulo by zero") + return wrapint(space, z) + + +def _divmod(space, x, y): + try: + z = ovfcheck(x // y) + except ZeroDivisionError: + raise oefmt(space.w_ZeroDivisionError, "integer divmod by zero") + # no overflow possible + m = x % y + w = space.wrap + return space.newtuple([w(z), w(m)]) + + +def _divmod_ovf2small(space, x, y): + from pypy.objspace.std.smalllongobject import W_SmallLongObject + a = r_longlong(x) + b = r_longlong(y) + return space.newtuple([W_SmallLongObject(a // b), + W_SmallLongObject(a % b)]) + + +def _lshift(space, a, b): + if r_uint(b) < LONG_BIT: # 0 <= b < LONG_BIT + c = ovfcheck(a << b) + return wrapint(space, c) + if b < 0: + raise oefmt(space.w_ValueError, "negative shift count") + # b >= LONG_BIT + if a == 0: + return wrapint(space, a) + raise OverflowError + + +def _lshift_ovf2small(space, a, b): + from pypy.objspace.std.smalllongobject import W_SmallLongObject + w_a = W_SmallLongObject.fromint(a) + w_b = W_SmallLongObject.fromint(b) + return w_a.descr_lshift(space, w_b) + + +def _rshift(space, a, b): + if r_uint(b) >= LONG_BIT: # not (0 <= b < LONG_BIT) + if b < 0: + raise oefmt(space.w_ValueError, "negative shift count") + # b >= LONG_BIT + if a == 0: + return wrapint(space, a) + a = -1 if a < 0 else 0 + else: + a = a >> b + return wrapint(space, a) + + + at jit.look_inside_iff(lambda space, iv, iw, iz: + jit.isconstant(iw) and jit.isconstant(iz)) +def _pow(space, iv, iw, iz): + """Helper for pow""" + if iw < 0: + if iz != 0: + raise oefmt(space.w_TypeError, + "pow() 2nd argument cannot be negative when 3rd " + "argument specified") + # bounce it, since it always returns float + raise ValueError + temp = iv + ix = 1 + while iw > 0: + if iw & 1: + ix = ovfcheck(ix * temp) + iw >>= 1 # Shift exponent down by 1 bit + if iw == 0: + break + temp = ovfcheck(temp * temp) # Square the value of temp + if iz: + # If we did a multiplication, perform a modulo + ix %= iz + temp %= iz + if iz: + ix %= iz + return ix + + +def _pow_ovf2long(space, iv, iw, w_modulus): + if space.is_none(w_modulus) and _recover_with_smalllong(space): + from pypy.objspace.std.smalllongobject import _pow as _pow_small + try: + # XXX: shouldn't have to pass r_longlong(0) here (see + # 4fa4c6b93a84) + return _pow_small(space, r_longlong(iv), iw, r_longlong(0)) + except (OverflowError, ValueError): + pass + from pypy.objspace.std.longobject import W_LongObject + w_iv = W_LongObject.fromint(space, iv) + w_iw = W_LongObject.fromint(space, iw) + return w_iv.descr_pow(space, w_iw, w_modulus) + + +def _make_ovf2long(opname, ovf2small=None): + op = getattr(operator, opname, None) + assert op or ovf2small + + def ovf2long(space, x, y): + """Handle overflowing to smalllong or long""" + if _recover_with_smalllong(space): + if ovf2small: + return ovf2small(space, x, y) + # Assume a generic operation without an explicit ovf2small + # handler + from pypy.objspace.std.smalllongobject import W_SmallLongObject + a = r_longlong(x) + b = r_longlong(y) + return W_SmallLongObject(op(a, b)) + + from pypy.objspace.std.longobject import W_LongObject + w_x = W_LongObject.fromint(space, x) + w_y = W_LongObject.fromint(space, y) + return getattr(w_x, 'descr_' + opname)(space, w_y) + + return ovf2long class W_IntObject(W_AbstractIntObject): + __slots__ = 'intval' _immutable_fields_ = ['intval'] -# from pypy.objspace.std.inttype import int_typedef as typedef - def __init__(self, intval): assert is_valid_int(intval) self.intval = intval @@ -32,18 +309,16 @@ """representation for debugging purposes""" return "%s(%d)" % (self.__class__.__name__, self.intval) - def unwrap(self, space): + def int_w(self, space): return int(self.intval) - int_w = unwrap + unwrap = int_w def uint_w(self, space): intval = self.intval if intval < 0: - raise OperationError( - space.w_ValueError, - space.wrap("cannot convert negative integer to unsigned")) - else: - return r_uint(intval) + raise oefmt(space.w_ValueError, + "cannot convert negative integer to unsigned") + return r_uint(intval) def bigint_w(self, space): return rbigint.fromint(self.intval) @@ -52,272 +327,545 @@ return float(self.intval) def int(self, space): - if (type(self) is not W_IntObject and - space.is_overloaded(self, space.w_int, '__int__')): - return W_Object.int(self, space) - if space.is_w(space.type(self), space.w_int): + if type(self) is W_IntObject: return self + if not space.is_overloaded(self, space.w_int, '__int__'): + return space.newint(self.intval) + return W_Root.int(self, space) + + @staticmethod + @unwrap_spec(w_x=WrappedDefault(0)) + def descr_new(space, w_inttype, w_x, w_base=None): + """T.__new__(S, ...) -> a new object with type S, a subtype of T""" + return _new_int(space, w_inttype, w_x, w_base) + + def descr_hash(self, space): + # unlike CPython, we don't special-case the value -1 in most of + # our hash functions, so there is not much sense special-casing + # it here either. Make sure this is consistent with the hash of + # floats and longs. + return self.int(space) + + def _int(self, space): + return self.int(space) + + descr_pos = func_with_new_name(_int, 'descr_pos') + descr_index = func_with_new_name(_int, 'descr_index') + descr_trunc = func_with_new_name(_int, 'descr_trunc') + descr_conjugate = func_with_new_name(_int, 'descr_conjugate') + + descr_get_numerator = func_with_new_name(_int, 'descr_get_numerator') + descr_get_real = func_with_new_name(_int, 'descr_get_real') + + def descr_get_denominator(self, space): + return wrapint(space, 1) + + def descr_get_imag(self, space): + return wrapint(space, 0) + + def descr_coerce(self, space, w_other): + if not isinstance(w_other, W_AbstractIntObject): + return space.w_NotImplemented + return space.newtuple([self, w_other]) + + def descr_long(self, space): + # XXX: should try smalllong + from pypy.objspace.std.longobject import W_LongObject + return W_LongObject.fromint(space, self.intval) + + def descr_nonzero(self, space): + return space.newbool(self.intval != 0) + + def descr_invert(self, space): + return wrapint(space, ~self.intval) + + def descr_neg(self, space): a = self.intval - return space.newint(a) + try: + b = ovfcheck(-a) + except OverflowError: + if _recover_with_smalllong(space): + from pypy.objspace.std.smalllongobject import W_SmallLongObject + x = r_longlong(a) + return W_SmallLongObject(-x) + return self.descr_long(space).descr_neg(space) + return wrapint(space, b) -#registerimplementation(W_IntObject) + def descr_abs(self, space): + pos = self.intval >= 0 + return self.int(space) if pos else self.descr_neg(space) -def repr__Int(space, w_int1): - a = w_int1.intval - res = str(a) - return space.wrap(res) + def descr_float(self, space): + a = self.intval + x = float(a) + return space.newfloat(x) -str__Int = repr__Int + def descr_oct(self, space): + return space.wrap(oct(self.intval)) -def format__Int_ANY(space, w_int, w_format_spec): - return newformat.run_formatter(space, w_format_spec, "format_int_or_long", - w_int, newformat.INT_KIND) + def descr_hex(self, space): + return space.wrap(hex(self.intval)) -def declare_new_int_comparison(opname): - import operator - from rpython.tool.sourcetools import func_with_new_name - op = getattr(operator, opname) - def f(space, w_int1, w_int2): - i = w_int1.intval - j = w_int2.intval - return space.newbool(op(i, j)) - name = "%s__Int_Int" % (opname,) - return func_with_new_name(f, name), name + def descr_getnewargs(self, space): + return space.newtuple([wrapint(space, self.intval)]) -for op in ['lt', 'le', 'eq', 'ne', 'gt', 'ge']: - func, name = declare_new_int_comparison(op) - globals()[name] = func + def descr_bit_length(self, space): + val = self.intval + if val < 0: + val = -val + bits = 0 + while val: + bits += 1 + val >>= 1 + return space.wrap(bits) -def hash__Int(space, w_int1): - # unlike CPython, we don't special-case the value -1 in most of our - # hash functions, so there is not much sense special-casing it here either. - # Make sure this is consistent with the hash of floats and longs. - return w_int1.int(space) + def descr_repr(self, space): + res = str(self.intval) + return space.wrap(res) + descr_str = func_with_new_name(descr_repr, 'descr_str') + def descr_format(self, space, w_format_spec): + return newformat.run_formatter(space, w_format_spec, + "format_int_or_long", self, + newformat.INT_KIND) -def add__Int_Int(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval + @unwrap_spec(w_modulus=WrappedDefault(None)) + def descr_pow(self, space, w_exponent, w_modulus=None): + if not isinstance(w_exponent, W_IntObject): + return space.w_NotImplemented + + x = self.intval + y = w_exponent.intval + + if space.is_none(w_modulus): + z = 0 + elif isinstance(w_modulus, W_IntObject): + z = w_modulus.intval + if z == 0: + raise oefmt(space.w_ValueError, + "pow() 3rd argument cannot be 0") + else: + # can't return NotImplemented (space.pow doesn't do full + # ternary, i.e. w_modulus.__zpow__(self, w_exponent)), so + # handle it ourselves + return _pow_ovf2long(space, x, y, w_modulus) + + try: + result = _pow(space, x, y, z) + except (OverflowError, ValueError): + return _pow_ovf2long(space, x, y, w_modulus) + return space.wrap(result) + + @unwrap_spec(w_modulus=WrappedDefault(None)) + def descr_rpow(self, space, w_base, w_modulus=None): + if not isinstance(w_base, W_IntObject): + return space.w_NotImplemented + return w_base.descr_pow(space, self, w_modulus) + + def _make_descr_cmp(opname): + op = getattr(operator, opname) + @func_renamer('descr_' + opname) + def descr_cmp(self, space, w_other): + if not isinstance(w_other, W_IntObject): + return space.w_NotImplemented + i = self.intval + j = w_other.intval + return space.newbool(op(i, j)) + return descr_cmp + + descr_lt = _make_descr_cmp('lt') + descr_le = _make_descr_cmp('le') + descr_eq = _make_descr_cmp('eq') + descr_ne = _make_descr_cmp('ne') + descr_gt = _make_descr_cmp('gt') + descr_ge = _make_descr_cmp('ge') + + def _make_generic_descr_binop(opname, ovf=True): + op = getattr(operator, + opname + '_' if opname in ('and', 'or') else opname) + descr_rname = 'descr_r' + opname + if ovf: + ovf2long = _make_ovf2long(opname) + + @func_renamer('descr_' + opname) + def descr_binop(self, space, w_other): + if not isinstance(w_other, W_IntObject): + return space.w_NotImplemented + + x = self.intval + y = w_other.intval + if ovf: + try: + z = ovfcheck(op(x, y)) + except OverflowError: + return ovf2long(space, x, y) + else: + z = op(x, y) + return wrapint(space, z) + + if opname in COMMUTATIVE_OPS: + @func_renamer(descr_rname) + def descr_rbinop(self, space, w_other): + return descr_binop(self, space, w_other) + return descr_binop, descr_rbinop + + @func_renamer(descr_rname) + def descr_rbinop(self, space, w_other): + if not isinstance(w_other, W_IntObject): + return space.w_NotImplemented + + x = self.intval + y = w_other.intval + if ovf: + try: + z = ovfcheck(op(y, x)) + except OverflowError: + return ovf2long(space, y, x) + else: + z = op(y, x) + return wrapint(space, z) + + return descr_binop, descr_rbinop + + descr_add, descr_radd = _make_generic_descr_binop('add') + descr_sub, descr_rsub = _make_generic_descr_binop('sub') + descr_mul, descr_rmul = _make_generic_descr_binop('mul') + + descr_and, descr_rand = _make_generic_descr_binop('and', ovf=False) + descr_or, descr_ror = _make_generic_descr_binop('or', ovf=False) + descr_xor, descr_rxor = _make_generic_descr_binop('xor', ovf=False) + + def _make_descr_binop(func, ovf=True, ovf2small=None): + opname = func.__name__[1:] + if ovf: + ovf2long = _make_ovf2long(opname, ovf2small) + + @func_renamer('descr_' + opname) + def descr_binop(self, space, w_other): + if not isinstance(w_other, W_IntObject): + return space.w_NotImplemented + + x = self.intval + y = w_other.intval + if ovf: + try: + return func(space, x, y) + except OverflowError: + return ovf2long(space, x, y) + else: + return func(space, x, y) + + @func_renamer('descr_r' + opname) + def descr_rbinop(self, space, w_other): + if not isinstance(w_other, W_IntObject): + return space.w_NotImplemented + + x = self.intval + y = w_other.intval + if ovf: + try: + return func(space, y, x) + except OverflowError: + return ovf2long(space, y, x) + else: + return func(space, y, x) + + return descr_binop, descr_rbinop + + descr_lshift, descr_rlshift = _make_descr_binop( + _lshift, ovf2small=_lshift_ovf2small) + descr_rshift, descr_rrshift = _make_descr_binop(_rshift, ovf=False) + + descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) + descr_div, descr_rdiv = _make_descr_binop(_div) + descr_truediv, descr_rtruediv = _make_descr_binop(_truediv, ovf=False) + descr_mod, descr_rmod = _make_descr_binop(_mod) + descr_divmod, descr_rdivmod = _make_descr_binop( + _divmod, ovf2small=_divmod_ovf2small) + + +def wrapint(space, x): + if not space.config.objspace.std.withprebuiltint: + return W_IntObject(x) + lower = space.config.objspace.std.prebuiltintfrom + upper = space.config.objspace.std.prebuiltintto + # use r_uint to perform a single comparison (this whole function is + # getting inlined into every caller so keeping the branching to a + # minimum is a good idea) + index = r_uint(x - lower) + if index >= r_uint(upper - lower): + w_res = instantiate(W_IntObject) + else: + w_res = W_IntObject.PREBUILT[index] + # obscure hack to help the CPU cache: we store 'x' even into a + # prebuilt integer's intval. This makes sure that the intval field + # is present in the cache in the common case where it is quickly + # reused. (we could use a prefetch hint if we had that) + w_res.intval = x + return w_res + + +def wrap_parsestringerror(space, e, w_source): + if isinstance(e, InvalidBaseError): + w_msg = space.wrap(e.msg) + else: + w_msg = space.wrap('%s: %s' % (e.msg, + space.str_w(space.repr(w_source)))) + return OperationError(space.w_ValueError, w_msg) + + +def _recover_with_smalllong(space): + """True if there is a chance that a SmallLong would fit when an Int + does not + """ + return (space.config.objspace.std.withsmalllong and + sys.maxint == 2147483647) + + + at jit.elidable +def _string_to_int_or_long(space, w_source, string, base=10): + w_longval = None + value = 0 try: - z = ovfcheck(x + y) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer addition")) - return space.newint(z) + value = string_to_int(string, base) + except ParseStringError as e: + raise wrap_parsestringerror(space, e, w_source) + except ParseStringOverflowError as e: + w_longval = _retry_to_w_long(space, e.parser, w_source) + return value, w_longval -def sub__Int_Int(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval + +def _retry_to_w_long(space, parser, w_source): + parser.rewind() try: - z = ovfcheck(x - y) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer substraction")) - return space.newint(z) + bigint = rbigint._from_numberstring_parser(parser) + except ParseStringError as e: + raise wrap_parsestringerror(space, e, w_source) + return space.newlong_from_rbigint(bigint) -def mul__Int_Int(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval - try: - z = ovfcheck(x * y) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer multiplication")) - return space.newint(z) -def floordiv__Int_Int(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval - try: - z = ovfcheck(x // y) - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer division by zero")) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer division")) - return space.newint(z) -div__Int_Int = floordiv__Int_Int +def _new_int(space, w_inttype, w_x, w_base=None): + w_longval = None + w_value = w_x # 'x' is the keyword argument name in CPython + value = 0 + if w_base is None: + # check for easy cases + if type(w_value) is W_IntObject: + value = w_value.intval + elif (space.lookup(w_value, '__int__') is not None or + space.lookup(w_value, '__trunc__') is not None): + # otherwise, use the __int__() or the __trunc__() methods + w_obj = w_value + if space.lookup(w_obj, '__int__') is None: + w_obj = space.trunc(w_obj) + w_obj = space.int(w_obj) + # 'int(x)' should return what x.__int__() returned, which should + # be an int or long or a subclass thereof. + if space.is_w(w_inttype, space.w_int): + return w_obj + # int_w is effectively what we want in this case, + # we cannot construct a subclass of int instance with an + # an overflowing long + value = space.int_w(w_obj) + elif space.isinstance_w(w_value, space.w_str): + value, w_longval = _string_to_int_or_long(space, w_value, + space.str_w(w_value)) + elif space.isinstance_w(w_value, space.w_unicode): + from pypy.objspace.std.unicodeobject import unicode_to_decimal_w + string = unicode_to_decimal_w(space, w_value) + value, w_longval = _string_to_int_or_long(space, w_value, string) + else: + # If object supports the buffer interface + try: + w_buffer = space.buffer(w_value) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + raise oefmt(space.w_TypeError, + "int() argument must be a string or a number, " + "not '%T'", w_value) + else: + buf = space.interp_w(Buffer, w_buffer) + value, w_longval = _string_to_int_or_long(space, w_value, + buf.as_str()) + ok = True + else: + base = space.int_w(w_base) -def truediv__Int_Int(space, w_int1, w_int2): - x = float(w_int1.intval) - y = float(w_int2.intval) - if y == 0.0: - raise FailedToImplementArgs(space.w_ZeroDivisionError, - space.wrap("float division")) - return space.wrap(x / y) + if space.isinstance_w(w_value, space.w_unicode): + from pypy.objspace.std.unicodeobject import unicode_to_decimal_w + s = unicode_to_decimal_w(space, w_value) + else: + try: + s = space.str_w(w_value) + except OperationError as e: + raise oefmt(space.w_TypeError, + "int() can't convert non-string with explicit " + "base") -def mod__Int_Int(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval - try: - z = ovfcheck(x % y) - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer modulo by zero")) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer modulo")) - return space.newint(z) + value, w_longval = _string_to_int_or_long(space, w_value, s, base) -def divmod__Int_Int(space, w_int1, w_int2): - x = w_int1.intval - y = w_int2.intval - try: - z = ovfcheck(x // y) - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("integer divmod by zero")) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer modulo")) - # no overflow possible - m = x % y - w = space.wrap - return space.newtuple([w(z), w(m)]) + if w_longval is not None: + if not space.is_w(w_inttype, space.w_int): + raise oefmt(space.w_OverflowError, + "long int too large to convert to int") + return w_longval + elif space.is_w(w_inttype, space.w_int): + # common case + return wrapint(space, value) + else: + w_obj = space.allocate_instance(W_IntObject, w_inttype) + W_IntObject.__init__(w_obj, value) + return w_obj -# helper for pow() - at jit.look_inside_iff(lambda space, iv, iw, iz: - jit.isconstant(iw) and jit.isconstant(iz)) -def _impl_int_int_pow(space, iv, iw, iz): - if iw < 0: - if iz != 0: - raise OperationError(space.w_TypeError, - space.wrap("pow() 2nd argument " - "cannot be negative when 3rd argument specified")) - ## bounce it, since it always returns float - raise FailedToImplementArgs(space.w_ValueError, - space.wrap("integer exponentiation")) - temp = iv - ix = 1 - try: - while iw > 0: - if iw & 1: - ix = ovfcheck(ix*temp) - iw >>= 1 #/* Shift exponent down by 1 bit */ - if iw==0: - break - temp = ovfcheck(temp*temp) #/* Square the value of temp */ - if iz: - #/* If we did a multiplication, perform a modulo */ - ix = ix % iz; - temp = temp % iz; - if iz: - ix = ix % iz - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer exponentiation")) - return ix +W_IntObject.typedef = StdTypeDef("int", + __doc__ = """int(x=0) -> int or long +int(x, base=10) -> int or long -def pow__Int_Int_Int(space, w_int1, w_int2, w_int3): - x = w_int1.intval - y = w_int2.intval - z = w_int3.intval - if z == 0: - raise OperationError(space.w_ValueError, - space.wrap("pow() 3rd argument cannot be 0")) - return space.wrap(_impl_int_int_pow(space, x, y, z)) +Convert a number or string to an integer, or return 0 if no arguments +are given. If x is floating point, the conversion truncates towards zero. +If x is outside the integer range, the function returns a long instead. -def pow__Int_Int_None(space, w_int1, w_int2, w_int3): - x = w_int1.intval - y = w_int2.intval - return space.wrap(_impl_int_int_pow(space, x, y, 0)) +If x is not a number or if base is given, then x must be a string or +Unicode object representing an integer literal in the given base. The +literal can be preceded by '+' or '-' and be surrounded by whitespace. +The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to +interpret the base from the string as an integer literal. +>>> int('0b100', base=0) +4""", + __new__ = interp2app(W_IntObject.descr_new), -def neg__Int(space, w_int1): - a = w_int1.intval - try: - x = ovfcheck(-a) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer negation")) - return space.newint(x) -get_negint = neg__Int + numerator = typedef.GetSetProperty( + W_IntObject.descr_get_numerator, + doc="the numerator of a rational number in lowest terms"), + denominator = typedef.GetSetProperty( + W_IntObject.descr_get_denominator, + doc="the denominator of a rational number in lowest terms"), + real = typedef.GetSetProperty( + W_IntObject.descr_get_real, + doc="the real part of a complex number"), + imag = typedef.GetSetProperty( + W_IntObject.descr_get_imag, + doc="the imaginary part of a complex number"), + __repr__ = interp2app(W_IntObject.descr_repr, + doc=W_AbstractIntObject.descr_repr.__doc__), + __str__ = interp2app(W_IntObject.descr_str, + doc=W_AbstractIntObject.descr_str.__doc__), -def abs__Int(space, w_int1): - if w_int1.intval >= 0: - return w_int1.int(space) - else: - return get_negint(space, w_int1) + conjugate = interp2app(W_IntObject.descr_conjugate, + doc=W_AbstractIntObject.descr_conjugate.__doc__), + bit_length = interp2app(W_IntObject.descr_bit_length, + doc=W_AbstractIntObject.descr_bit_length.__doc__), + __format__ = interp2app(W_IntObject.descr_format, + doc=W_AbstractIntObject.descr_format.__doc__), + __hash__ = interp2app(W_IntObject.descr_hash, + doc=W_AbstractIntObject.descr_hash.__doc__), + __coerce__ = interp2app(W_IntObject.descr_coerce, + doc=W_AbstractIntObject.descr_coerce.__doc__), + __oct__ = interp2app(W_IntObject.descr_oct, + doc=W_AbstractIntObject.descr_oct.__doc__), + __hex__ = interp2app(W_IntObject.descr_hex, + doc=W_AbstractIntObject.descr_hex.__doc__), + __getnewargs__ = interp2app( + W_IntObject.descr_getnewargs, + doc=W_AbstractIntObject.descr_getnewargs.__doc__), -def nonzero__Int(space, w_int1): - return space.newbool(w_int1.intval != 0) + __int__ = interp2app(W_IntObject.int, + doc=W_AbstractIntObject.int.__doc__), + __long__ = interp2app(W_IntObject.descr_long, + doc=W_AbstractIntObject.descr_long.__doc__), + __index__ = interp2app(W_IntObject.descr_index, + doc=W_AbstractIntObject.descr_index.__doc__), + __trunc__ = interp2app(W_IntObject.descr_trunc, + doc=W_AbstractIntObject.descr_trunc.__doc__), + __float__ = interp2app(W_IntObject.descr_float, + doc=W_AbstractIntObject.descr_float.__doc__), -def invert__Int(space, w_int1): - x = w_int1.intval - a = ~x - return space.newint(a) + __pos__ = interp2app(W_IntObject.descr_pos, + doc=W_AbstractIntObject.descr_pos.__doc__), + __neg__ = interp2app(W_IntObject.descr_neg, + doc=W_AbstractIntObject.descr_neg.__doc__), + __abs__ = interp2app(W_IntObject.descr_abs, + doc=W_AbstractIntObject.descr_abs.__doc__), + __nonzero__ = interp2app(W_IntObject.descr_nonzero, + doc=W_AbstractIntObject.descr_nonzero.__doc__), + __invert__ = interp2app(W_IntObject.descr_invert, + doc=W_AbstractIntObject.descr_invert.__doc__), -def lshift__Int_Int(space, w_int1, w_int2): - a = w_int1.intval - b = w_int2.intval - if r_uint(b) < LONG_BIT: # 0 <= b < LONG_BIT - try: - c = ovfcheck(a << b) - except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer left shift")) - return space.newint(c) - if b < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative shift count")) - else: #b >= LONG_BIT - if a == 0: - return w_int1.int(space) - raise FailedToImplementArgs(space.w_OverflowError, - space.wrap("integer left shift")) + __lt__ = interp2app(W_IntObject.descr_lt, + doc=W_AbstractIntObject.descr_lt.__doc__), + __le__ = interp2app(W_IntObject.descr_le, + doc=W_AbstractIntObject.descr_le.__doc__), + __eq__ = interp2app(W_IntObject.descr_eq, + doc=W_AbstractIntObject.descr_eq.__doc__), + __ne__ = interp2app(W_IntObject.descr_ne, + doc=W_AbstractIntObject.descr_ne.__doc__), + __gt__ = interp2app(W_IntObject.descr_gt, + doc=W_AbstractIntObject.descr_gt.__doc__), + __ge__ = interp2app(W_IntObject.descr_ge, + doc=W_AbstractIntObject.descr_ge.__doc__), -def rshift__Int_Int(space, w_int1, w_int2): - a = w_int1.intval - b = w_int2.intval - if r_uint(b) >= LONG_BIT: # not (0 <= b < LONG_BIT) - if b < 0: - raise OperationError(space.w_ValueError, - space.wrap("negative shift count")) - else: # b >= LONG_BIT - if a == 0: - return w_int1.int(space) - if a < 0: - a = -1 - else: - a = 0 - else: - a = a >> b - return space.newint(a) + __add__ = interp2app(W_IntObject.descr_add, + doc=W_AbstractIntObject.descr_add.__doc__), + __radd__ = interp2app(W_IntObject.descr_radd, + doc=W_AbstractIntObject.descr_radd.__doc__), + __sub__ = interp2app(W_IntObject.descr_sub, + doc=W_AbstractIntObject.descr_sub.__doc__), + __rsub__ = interp2app(W_IntObject.descr_rsub, + doc=W_AbstractIntObject.descr_rsub.__doc__), + __mul__ = interp2app(W_IntObject.descr_mul, + doc=W_AbstractIntObject.descr_mul.__doc__), + __rmul__ = interp2app(W_IntObject.descr_rmul, + doc=W_AbstractIntObject.descr_rmul.__doc__), -def and__Int_Int(space, w_int1, w_int2): - a = w_int1.intval - b = w_int2.intval - res = a & b - return space.newint(res) + __and__ = interp2app(W_IntObject.descr_and, + doc=W_AbstractIntObject.descr_and.__doc__), + __rand__ = interp2app(W_IntObject.descr_rand, + doc=W_AbstractIntObject.descr_rand.__doc__), + __or__ = interp2app(W_IntObject.descr_or, + doc=W_AbstractIntObject.descr_or.__doc__), + __ror__ = interp2app(W_IntObject.descr_ror, + doc=W_AbstractIntObject.descr_ror.__doc__), + __xor__ = interp2app(W_IntObject.descr_xor, + doc=W_AbstractIntObject.descr_xor.__doc__), + __rxor__ = interp2app(W_IntObject.descr_rxor, + doc=W_AbstractIntObject.descr_rxor.__doc__), -def xor__Int_Int(space, w_int1, w_int2): - a = w_int1.intval - b = w_int2.intval - res = a ^ b - return space.newint(res) + __lshift__ = interp2app(W_IntObject.descr_lshift, + doc=W_AbstractIntObject.descr_lshift.__doc__), + __rlshift__ = interp2app(W_IntObject.descr_rlshift, + doc=W_AbstractIntObject.descr_rlshift.__doc__), + __rshift__ = interp2app(W_IntObject.descr_rshift, + doc=W_AbstractIntObject.descr_rshift.__doc__), + __rrshift__ = interp2app(W_IntObject.descr_rrshift, + doc=W_AbstractIntObject.descr_rrshift.__doc__), -def or__Int_Int(space, w_int1, w_int2): - a = w_int1.intval - b = w_int2.intval - res = a | b - return space.newint(res) + __floordiv__ = interp2app(W_IntObject.descr_floordiv, + doc=W_AbstractIntObject.descr_floordiv.__doc__), + __rfloordiv__ = interp2app( + W_IntObject.descr_rfloordiv, + doc=W_AbstractIntObject.descr_rfloordiv.__doc__), + __div__ = interp2app(W_IntObject.descr_div, + doc=W_AbstractIntObject.descr_div.__doc__), + __rdiv__ = interp2app(W_IntObject.descr_rdiv, + doc=W_AbstractIntObject.descr_rdiv.__doc__), + __truediv__ = interp2app(W_IntObject.descr_truediv, + doc=W_AbstractIntObject.descr_truediv.__doc__), + __rtruediv__ = interp2app(W_IntObject.descr_rtruediv, + doc=W_AbstractIntObject.descr_rtruediv.__doc__), + __mod__ = interp2app(W_IntObject.descr_mod, + doc=W_AbstractIntObject.descr_mod.__doc__), + __rmod__ = interp2app(W_IntObject.descr_rmod, + doc=W_AbstractIntObject.descr_rmod.__doc__), + __divmod__ = interp2app(W_IntObject.descr_divmod, + doc=W_AbstractIntObject.descr_divmod.__doc__), + __rdivmod__ = interp2app(W_IntObject.descr_rdivmod, + doc=W_AbstractIntObject.descr_rdivmod.__doc__), -def pos__Int(self, space): - return self.int(space) -trunc__Int = pos__Int - -def index__Int(space, w_int1): - return w_int1.int(space) - -def float__Int(space, w_int1): - a = w_int1.intval - x = float(a) - return space.newfloat(x) - -def getnewargs__Int(space, w_int1): - return space.newtuple([space.newint(w_int1.intval)]) - - -register_all(vars()) + __pow__ = interp2app(W_IntObject.descr_pow, + doc=W_AbstractIntObject.descr_pow.__doc__), + __rpow__ = interp2app(W_IntObject.descr_rpow, + doc=W_AbstractIntObject.descr_rpow.__doc__), +) diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py deleted file mode 100644 --- a/pypy/objspace/std/inttype.py +++ /dev/null @@ -1,230 +0,0 @@ -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault,\ - interpindirect2app -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.buffer import Buffer -from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.stdtypedef import StdTypeDef, SMM -from pypy.objspace.std.model import W_Object -from rpython.rlib.rarithmetic import r_uint, string_to_int -from rpython.rlib.objectmodel import instantiate -from rpython.rlib.rbigint import rbigint -from rpython.rlib.rstring import ( - InvalidBaseError, ParseStringError, ParseStringOverflowError) -from rpython.rlib import jit - -# ____________________________________________________________ - -## def descr_conjugate(space, w_int): -## "Returns self, the complex conjugate of any int." -## return space.int(w_int) - -## def descr_bit_length(space, w_int): -## """int.bit_length() -> int - -## Number of bits necessary to represent self in binary. -## >>> bin(37) -## '0b100101' -## >>> (37).bit_length() -## 6 -## """ -## val = space.int_w(w_int) -## if val < 0: -## val = -val -## bits = 0 -## while val: -## bits += 1 -## val >>= 1 -## return space.wrap(bits) - -## @gateway.unwrap_spec(s='bufferstr', byteorder=str) -## def descr_from_bytes(space, w_cls, s, byteorder): -## from pypy.objspace.std.longtype import descr_from_bytes -## return descr_from_bytes(space, space.w_int, s, byteorder) - -def wrapint(space, x): - return space.newlong(x) - -## def wrapint(space, x): -## if space.config.objspace.std.withprebuiltint: -## from pypy.objspace.std.intobject import W_IntObject -## lower = space.config.objspace.std.prebuiltintfrom -## upper = space.config.objspace.std.prebuiltintto -## # use r_uint to perform a single comparison (this whole function -## # is getting inlined into every caller so keeping the branching -## # to a minimum is a good idea) -## index = r_uint(x - lower) -## if index >= r_uint(upper - lower): -## w_res = instantiate(W_IntObject) -## else: -## w_res = W_IntObject.PREBUILT[index] -## # obscure hack to help the CPU cache: we store 'x' even into -## # a prebuilt integer's intval. This makes sure that the intval -## # field is present in the cache in the common case where it is -## # quickly reused. (we could use a prefetch hint if we had that) -## w_res.intval = x -## return w_res -## else: -## from pypy.objspace.std.intobject import W_IntObject -## return W_IntObject(x) - -# ____________________________________________________________ - -## @jit.elidable -## def string_to_int_or_long(space, w_source, string, base=10): -## w_longval = None -## value = 0 -## try: -## value = string_to_int(string, base) -## except ParseStringError as e: -## raise wrap_parsestringerror(space, e, w_source) -## except ParseStringOverflowError, e: -## w_longval = retry_to_w_long(space, e.parser, w_source) -## return value, w_longval - -## def retry_to_w_long(space, parser, w_source): -## parser.rewind() -## try: -## bigint = rbigint._from_numberstring_parser(parser) -## except ParseStringError as e: -## raise wrap_parsestringerror(space, e, w_source) -## return space.newlong_from_rbigint(bigint) - -def wrap_parsestringerror(space, e, w_source): - if isinstance(e, InvalidBaseError): - w_msg = space.wrap(e.msg) - else: - w_msg = space.wrap(u'%s: %s' % (unicode(e.msg), - space.unicode_w(space.repr(w_source)))) - return OperationError(space.w_ValueError, w_msg) - -## @unwrap_spec(w_x = WrappedDefault(0)) -## def descr__new__(space, w_inttype, w_x, w_base=None): -## from pypy.objspace.std.intobject import W_IntObject -## w_longval = None -## w_value = w_x # 'x' is the keyword argument name in CPython -## value = 0 -## if w_base is None: -## # check for easy cases -## if type(w_value) is W_IntObject: -## value = w_value.intval -## elif space.lookup(w_value, '__int__') is not None or \ -## space.lookup(w_value, '__trunc__') is not None: -## # otherwise, use the __int__() or the __trunc__() methods -## w_obj = w_value -## if space.lookup(w_obj, '__int__') is None: -## w_obj = space.trunc(w_obj) -## w_obj = space.int(w_obj) -## # 'int(x)' should return what x.__int__() returned, which should -## # be an int or long or a subclass thereof. -## if space.is_w(w_inttype, space.w_int): -## return w_obj -## # int_w is effectively what we want in this case, -## # we cannot construct a subclass of int instance with an -## # an overflowing long -## value = space.int_w(w_obj) -## elif space.isinstance_w(w_value, space.w_str): -## value, w_longval = string_to_int_or_long(space, w_value, -## space.str_w(w_value)) -## elif space.isinstance_w(w_value, space.w_unicode): -## from pypy.objspace.std.unicodeobject import unicode_to_decimal_w -## string = unicode_to_decimal_w(space, w_value) -## value, w_longval = string_to_int_or_long(space, w_value, string) -## else: -## # If object supports the buffer interface -## try: -## w_buffer = space.buffer(w_value) -## except OperationError, e: -## if not e.match(space, space.w_TypeError): -## raise -## raise oefmt(space.w_TypeError, -## "int() argument must be a string or a number, not " -## "'%T'", w_value) -## else: -## buf = space.interp_w(Buffer, w_buffer) -## value, w_longval = string_to_int_or_long(space, w_value, -## buf.as_str()) -## else: -## base = space.int_w(w_base) - -## if space.isinstance_w(w_value, space.w_unicode): -## from pypy.objspace.std.unicodeobject import unicode_to_decimal_w -## s = unicode_to_decimal_w(space, w_value) -## else: -## try: -## s = space.str_w(w_value) -## except OperationError, e: -## raise OperationError(space.w_TypeError, -## space.wrap("int() can't convert non-string " -## "with explicit base")) - -## value, w_longval = string_to_int_or_long(space, w_value, s, base) - -## if w_longval is not None: -## if not space.is_w(w_inttype, space.w_int): -## raise OperationError(space.w_OverflowError, -## space.wrap( -## "long int too large to convert to int")) -## return w_longval -## elif space.is_w(w_inttype, space.w_int): -## # common case -## return wrapint(space, value) -## else: -## w_obj = space.allocate_instance(W_IntObject, w_inttype) -## W_IntObject.__init__(w_obj, value) -## return w_obj - -## def descr_get_numerator(space, w_obj): -## return space.int(w_obj) - -## def descr_get_denominator(space, w_obj): -## return space.wrap(1) - -## def descr_get_real(space, w_obj): -## return space.int(w_obj) - -## def descr_get_imag(space, w_obj): -## return space.wrap(0) - -# ____________________________________________________________ - -class W_AbstractIntObject(W_Object): - __slots__ = () - - def is_w(self, space, w_other): - if not isinstance(w_other, W_AbstractIntObject): - return False - if self.user_overridden_class or w_other.user_overridden_class: - return self is w_other - return space.bigint_w(self).eq(space.bigint_w(w_other)) - - def immutable_unique_id(self, space): - if self.user_overridden_class: - return None - from pypy.objspace.std.model import IDTAG_INT as tag - b = space.bigint_w(self) - b = b.lshift(3).or_(rbigint.fromint(tag)) - return space.newlong_from_rbigint(b) - - def int(self, space): - raise NotImplementedError - -## int_typedef = StdTypeDef("int", -## __doc__ = '''int(x[, base]) -> integer - -## Convert a string or number to an integer, if possible. A floating point -## argument will be truncated towards zero (this does not include a string -## representation of a floating point number!) When converting a string, use -## the optional base. It is an error to supply a base when converting a -## non-string. If the argument is outside the integer range a long object -## will be returned instead.''', -## __new__ = interp2app(descr__new__), -## conjugate = interp2app(descr_conjugate), -## bit_length = interp2app(descr_bit_length), -## numerator = typedef.GetSetProperty(descr_get_numerator), -## denominator = typedef.GetSetProperty(descr_get_denominator), -## real = typedef.GetSetProperty(descr_get_real), -## imag = typedef.GetSetProperty(descr_get_imag), -## __int__ = interpindirect2app(W_AbstractIntObject.int), -## ) -## int_typedef.registermethods(globals()) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -1,81 +1,269 @@ """The builtin long implementation""" -import sys +import functools -from rpython.rlib.rarithmetic import intmask -from rpython.rlib.rbigint import SHIFT, _widen_digit, rbigint +from rpython.rlib.objectmodel import specialize +from rpython.rlib.rbigint import rbigint +from rpython.rlib.rstring import ParseStringError +from rpython.tool.sourcetools import func_renamer, func_with_new_name -from pypy.interpreter.error import OperationError -from pypy.objspace.std import model, newformat -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.longtype import W_AbstractLongObject, long_typedef -from pypy.objspace.std.model import W_Object, registerimplementation -from pypy.objspace.std.multimethod import FailedToImplementArgs -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.register_all import register_all +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.buffer import Buffer +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import ( + WrappedDefault, interp2app, interpindirect2app, unwrap_spec) +from pypy.objspace.std import newformat +from pypy.objspace.std.intobject import W_AbstractIntObject +from pypy.objspace.std.model import ( + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG) +from pypy.objspace.std.stdtypedef import StdTypeDef + HASH_BITS = 61 if sys.maxsize > 2 ** 31 - 1 else 31 HASH_MODULUS = 2 ** HASH_BITS - 1 +def delegate_other(func): + @functools.wraps(func) + def delegated(self, space, w_other): + if isinstance(w_other, W_AbstractIntObject): + w_other = w_other.descr_long(space) + elif not isinstance(w_other, W_AbstractLongObject): + return space.w_NotImplemented + return func(self, space, w_other) + return delegated + + +class W_AbstractLongObject(W_Root): + + __slots__ = () + + def is_w(self, space, w_other): + if not isinstance(w_other, W_AbstractLongObject): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + return space.bigint_w(self).eq(space.bigint_w(w_other)) + + def immutable_unique_id(self, space): + if self.user_overridden_class: + return None + b = space.bigint_w(self) + b = b.lshift(3).or_(rbigint.fromint(IDTAG_LONG)) + return space.newlong_from_rbigint(b) + + def unwrap(self, space): + return self.longval() + + def int(self, space): + raise NotImplementedError + + def asbigint(self): + raise NotImplementedError + + def descr_getnewargs(self, space): + return space.newtuple([newlong(space, self.asbigint())]) + + def descr_conjugate(self, space): + """Returns self, the complex conjugate of any long.""" + return space.long(self) + + def descr_bit_length(self, space): + """long.bit_length() -> int or long + + Number of bits necessary to represent self in binary. + >>> bin(37L) + '0b100101' + >>> (37L).bit_length() + 6 + """ + bigint = space.bigint_w(self) + try: + return space.wrap(bigint.bit_length()) + except OverflowError: + raise oefmt(space.w_OverflowError, "too many digits in integer") + + def _truediv(self, space, w_other): + try: + f = self.asbigint().truediv(w_other.asbigint()) + except ZeroDivisionError: + raise oefmt(space.w_ZeroDivisionError, + "long division or modulo by zero") From noreply at buildbot.pypy.org Tue Mar 4 05:29:32 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 4 Mar 2014 05:29:32 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge upstream Message-ID: <20140304042932.A3E841C3427@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69653:dd1df5603029 Date: 2014-03-03 20:24 -0800 http://bitbucket.org/pypy/pypy/changeset/dd1df5603029/ Log: merge upstream diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py --- a/pypy/module/thread/test/test_lock.py +++ b/pypy/module/thread/test/test_lock.py @@ -1,6 +1,6 @@ from __future__ import with_statement import py -import sys +import sys, os from pypy.module.thread.test.support import GenericTestThread from rpython.translator.c.test.test_genc import compile @@ -150,7 +150,7 @@ class AppTestLockSignals(GenericTestThread): - pytestmark = py.test.mark.skipif("sys.platform != 'posix'") + pytestmark = py.test.mark.skipif("os.name != 'posix'") def setup_class(cls): cls.w_using_pthread_cond = cls.space.wrap(sys.platform == 'freebsd6') diff --git a/rpython/rlib/rmmap.py b/rpython/rlib/rmmap.py --- a/rpython/rlib/rmmap.py +++ b/rpython/rlib/rmmap.py @@ -10,6 +10,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rposix from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.nonconst import NonConstant from rpython.rlib.rarithmetic import intmask @@ -675,14 +676,20 @@ return m def alloc_hinted(hintp, map_size): - flags = NonConstant(MAP_PRIVATE | MAP_ANONYMOUS) - prot = NonConstant(PROT_EXEC | PROT_READ | PROT_WRITE) + flags = MAP_PRIVATE | MAP_ANONYMOUS + prot = PROT_EXEC | PROT_READ | PROT_WRITE + if we_are_translated(): + flags = NonConstant(flags) + prot = NonConstant(prot) return c_mmap_safe(hintp, map_size, prot, flags, -1, 0) def clear_large_memory_chunk_aligned(addr, map_size): addr = rffi.cast(PTR, addr) - flags = NonConstant(MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS) - prot = NonConstant(PROT_READ | PROT_WRITE) + flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS + prot = PROT_READ | PROT_WRITE + if we_are_translated(): + flags = NonConstant(flags) + prot = NonConstant(prot) res = c_mmap_safe(addr, map_size, prot, flags, -1, 0) return res == addr From noreply at buildbot.pypy.org Tue Mar 4 05:29:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 4 Mar 2014 05:29:33 +0100 (CET) Subject: [pypy-commit] pypy py3k: reapply py3k changes and adapt to remove-intlong-smm. restores usage of Message-ID: <20140304042933.E4F661C3427@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69654:83cd0eaa978e Date: 2014-03-03 20:26 -0800 http://bitbucket.org/pypy/pypy/changeset/83cd0eaa978e/ Log: reapply py3k changes and adapt to remove-intlong-smm. restores usage of W_IntObject! diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -4,7 +4,6 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.floattype import float_typedef -from pypy.objspace.std.longtype import long_typedef from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.complextype import complex_typedef diff --git a/pypy/module/sys/system.py b/pypy/module/sys/system.py --- a/pypy/module/sys/system.py +++ b/pypy/module/sys/system.py @@ -1,7 +1,7 @@ """Information about the current system.""" from pypy.objspace.std.complexobject import HASH_IMAG from pypy.objspace.std.floatobject import HASH_INF, HASH_NAN -from pypy.objspace.std.longobject import HASH_MODULUS +from pypy.objspace.std.intobject import HASH_MODULUS from pypy.interpreter import gateway from rpython.rlib import rbigint, rfloat from rpython.rtyper.lltypesystem import lltype, rffi diff --git a/pypy/objspace/std/boolobject.py b/pypy/objspace/std/boolobject.py --- a/pypy/objspace/std/boolobject.py +++ b/pypy/objspace/std/boolobject.py @@ -48,7 +48,7 @@ return space.wrap('True' if self.intval else 'False') descr_str = func_with_new_name(descr_repr, 'descr_str') - def descr_nonzero(self, space): + def descr_bool(self, space): return self def _make_bitwise_binop(opname): @@ -91,8 +91,8 @@ doc=W_AbstractIntObject.descr_repr.__doc__), __str__ = interp2app(W_BoolObject.descr_str, doc=W_AbstractIntObject.descr_str.__doc__), - __nonzero__ = interp2app(W_BoolObject.descr_nonzero, - doc=W_AbstractIntObject.descr_nonzero.__doc__), + __bool__ = interp2app(W_BoolObject.descr_bool, + doc=W_AbstractIntObject.descr_bool.__doc__), __and__ = interp2app(W_BoolObject.descr_and, doc=W_AbstractIntObject.descr_and.__doc__), diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -3,12 +3,12 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.objspace.std import model, newformat from pypy.objspace.std.floattype import float_typedef, W_AbstractFloatObject +from pypy.objspace.std.intobject import HASH_BITS, HASH_MODULUS from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.longobject import ( - HASH_BITS, HASH_MODULUS, W_LongObject, newlong_from_float) +from pypy.objspace.std.longobject import W_LongObject, newlong_from_float from rpython.rlib.rarithmetic import ( LONG_BIT, intmask, ovfcheck_float_to_int, r_uint) from rpython.rlib.rfloat import ( diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -1,4 +1,5 @@ -"""The builtin int implementation +"""The builtin int type (W_AbstractInt) and the base impl (W_IntObject) +based on rpython ints. In order to have the same behavior running on CPython, and after RPython translation this module uses rarithmetic.ovfcheck to explicitly check @@ -11,8 +12,10 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import instantiate, import_from_mixin, specialize from rpython.rlib.rarithmetic import ( - LONG_BIT, is_valid_int, ovfcheck, r_longlong, r_uint, string_to_int) -from rpython.rlib.rbigint import rbigint + LONG_BIT, intmask, is_valid_int, ovfcheck, r_longlong, r_uint, + string_to_int) +from rpython.rlib.rbigint import ( + InvalidEndiannessError, InvalidSignednessError, rbigint) from rpython.rlib.rstring import ( InvalidBaseError, ParseStringError, ParseStringOverflowError) from rpython.tool.sourcetools import func_renamer, func_with_new_name @@ -21,7 +24,8 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec +from pypy.interpreter.gateway import ( + WrappedDefault, applevel, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import newformat from pypy.objspace.std.model import ( BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT) @@ -30,6 +34,9 @@ SENTINEL = object() +HASH_BITS = 61 if sys.maxsize > 2 ** 31 - 1 else 31 +HASH_MODULUS = 2 ** HASH_BITS - 1 + class W_AbstractIntObject(W_Root): @@ -42,7 +49,7 @@ return False if self.user_overridden_class or w_other.user_overridden_class: return self is w_other - return space.int_w(self) == space.int_w(w_other) + return space.bigint_w(self).eq(space.bigint_w(w_other)) def immutable_unique_id(self, space): if self.user_overridden_class: @@ -51,10 +58,141 @@ b = b.lshift(3).or_(rbigint.fromint(IDTAG_INT)) return space.newlong_from_rbigint(b) + @staticmethod + @unwrap_spec(byteorder=str, signed=bool) + def descr_from_bytes(space, w_inttype, w_obj, byteorder, signed=False): + """int.from_bytes(bytes, byteorder, *, signed=False) -> int + + Return the integer represented by the given array of bytes. + + The bytes argument must either support the buffer protocol or be + an iterable object producing bytes. Bytes and bytearray are + examples of built-in objects that support the buffer protocol. + + The byteorder argument determines the byte order used to + represent the integer. If byteorder is 'big', the most + significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end + of the byte array. To request the native byte order of the host + system, use `sys.byteorder' as the byte order value. + + The signed keyword-only argument indicates whether two's + complement is used to represent the integer. + """ + from pypy.objspace.std.bytesobject import makebytesdata_w + bytes = ''.join(makebytesdata_w(space, w_obj)) + try: + bigint = rbigint.frombytes(bytes, byteorder=byteorder, + signed=signed) + except InvalidEndiannessError: + raise oefmt(space.w_ValueError, + "byteorder must be either 'little' or 'big'") + try: + as_int = bigint.toint() + except OverflowError: + from pypy.objspace.std.longobject import newbigint + return newbigint(space, w_inttype, bigint) + else: + if space.is_w(w_inttype, space.w_int): + # common case + return wrapint(space, as_int) + w_obj = space.allocate_instance(W_IntObject, w_inttype) + W_IntObject.__init__(w_obj, as_int) + return w_obj + + @unwrap_spec(nbytes=int, byteorder=str, signed=bool) + def descr_to_bytes(self, space, nbytes, byteorder, signed=False): + """to_bytes(...) + int.to_bytes(length, byteorder, *, signed=False) -> bytes + + Return an array of bytes representing an integer. + + The integer is represented using length bytes. An OverflowError + is raised if the integer is not representable with the given + number of bytes. + + The byteorder argument determines the byte order used to + represent the integer. If byteorder is 'big', the most + significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end + of the byte array. To request the native byte order of the host + system, use `sys.byteorder' as the byte order value. + + The signed keyword-only argument determines whether two's + complement is used to represent the integer. If signed is False + and a negative integer is given, an OverflowError is raised. + """ + bigint = space.bigint_w(self) + try: + byte_string = bigint.tobytes(nbytes, byteorder=byteorder, + signed=signed) + except InvalidEndiannessError: + raise oefmt(space.w_ValueError, + "byteorder must be either 'little' or 'big'") + except InvalidSignednessError: + raise oefmt(space.w_OverflowError, + "can't convert negative int to unsigned") + except OverflowError: + raise oefmt(space.w_OverflowError, "int too big to convert") + return space.wrapbytes(byte_string) + + def descr_round(self, space, w_ndigits=None): + """Rounding an Integral returns itself. + Rounding with an ndigits argument also returns an integer. + """ + # To round an integer m to the nearest 10**n (n positive), we + # make use of the divmod_near operation, defined by: + # + # divmod_near(a, b) = (q, r) + # + # where q is the nearest integer to the quotient a / b (the + # nearest even integer in the case of a tie) and r == a - q * b. + # Hence q * b = a - r is the nearest multiple of b to a, + # preferring even multiples in the case of a tie. + # + # So the nearest multiple of 10**n to m is: + # + # m - divmod_near(m, 10**n)[1] + + # XXX: since divmod_near is pure python we can probably remove + # the longs used here. or this could at least likely be more + # efficient for W_IntObject + from pypy.objspace.std.longobject import newlong + + if w_ndigits is None: + return self.int(space) + + ndigits = space.bigint_w(space.index(w_ndigits)) + # if ndigits >= 0 then no rounding is necessary; return self + # unchanged + if ndigits.ge(rbigint.fromint(0)): + return self.int(space) + + # result = self - divmod_near(self, 10 ** -ndigits)[1] + right = rbigint.fromint(10).pow(ndigits.neg()) + w_tuple = divmod_near(space, self, newlong(space, right)) + _, w_r = space.fixedview(w_tuple, 2) + return space.sub(self, w_r) + + def _int(self, space): + return self.int(space) + + descr_get_numerator = func_with_new_name(_int, 'descr_get_numerator') + descr_get_real = func_with_new_name(_int, 'descr_get_real') + + def descr_get_denominator(self, space): + return wrapint(space, 1) + + def descr_get_imag(self, space): + return wrapint(space, 0) + def int(self, space): """x.__int__() <==> int(x)""" raise NotImplementedError + def asbigint(self): + raise NotImplementedError + def descr_format(self, space, w_format_spec): raise NotImplementedError @@ -76,7 +214,6 @@ descr_repr = _abstract_unaryop('repr') descr_str = _abstract_unaryop('str') - descr_coerce = _abstract_unaryop('coerce') descr_conjugate = _abstract_unaryop( 'conjugate', "Returns self, the complex conjugate of any int.") descr_bit_length = _abstract_unaryop('bit_length', """\ @@ -88,11 +225,8 @@ >>> (37).bit_length() 6""") descr_hash = _abstract_unaryop('hash') - descr_oct = _abstract_unaryop('oct') - descr_hex = _abstract_unaryop('hex') descr_getnewargs = _abstract_unaryop('getnewargs', None) - descr_long = _abstract_unaryop('long') descr_index = _abstract_unaryop( 'index', "x[y:z] <==> x[y.__index__():z.__index__()]") descr_trunc = _abstract_unaryop('trunc', @@ -102,7 +236,7 @@ descr_pos = _abstract_unaryop('pos', "x.__pos__() <==> +x") descr_neg = _abstract_unaryop('neg', "x.__neg__() <==> -x") descr_abs = _abstract_unaryop('abs') - descr_nonzero = _abstract_unaryop('nonzero', "x.__nonzero__() <==> x != 0") + descr_bool = _abstract_unaryop('bool', "x.__bool__() <==> x != 0") descr_invert = _abstract_unaryop('invert', "x.__invert__() <==> ~x") def _abstract_cmpop(opname): @@ -156,7 +290,8 @@ try: z = ovfcheck(x // y) except ZeroDivisionError: - raise oefmt(space.w_ZeroDivisionError, "integer division by zero") + raise oefmt(space.w_ZeroDivisionError, + "integer division or modulo by zero") return wrapint(space, z) _div = func_with_new_name(_floordiv, '_div') @@ -309,6 +444,22 @@ """representation for debugging purposes""" return "%s(%d)" % (self.__class__.__name__, self.intval) + def is_w(self, space, w_other): + from pypy.objspace.std.boolobject import W_BoolObject + if (not isinstance(w_other, W_AbstractIntObject) or + isinstance(w_other, W_BoolObject)): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + x = self.intval + try: + y = space.int_w(w_other) + except OperationError as e: + if e.match(space, space.w_OverflowError): + return False + raise + return x == y + def int_w(self, space): return int(self.intval) unwrap = int_w @@ -321,7 +472,7 @@ return r_uint(intval) def bigint_w(self, space): - return rbigint.fromint(self.intval) + return self.asbigint() def float_w(self, space): return float(self.intval) @@ -333,6 +484,9 @@ return space.newint(self.intval) return W_Root.int(self, space) + def asbigint(self): + return rbigint.fromint(self.intval) + @staticmethod @unwrap_spec(w_x=WrappedDefault(0)) def descr_new(space, w_inttype, w_x, w_base=None): @@ -340,11 +494,21 @@ return _new_int(space, w_inttype, w_x, w_base) def descr_hash(self, space): - # unlike CPython, we don't special-case the value -1 in most of - # our hash functions, so there is not much sense special-casing - # it here either. Make sure this is consistent with the hash of - # floats and longs. - return self.int(space) + a = self.intval + sign = 1 + if a < 0: + sign = -1 + a = -a + + x = r_uint(a) + # efficient x % HASH_MODULUS: as HASH_MODULUS is a Mersenne + # prime + x = (x & HASH_MODULUS) + (x >> HASH_BITS) + if x >= HASH_MODULUS: + x -= HASH_MODULUS + + x = intmask(intmask(x) * sign) + return wrapint(space, -2 if x == -1 else x) def _int(self, space): return self.int(space) @@ -354,26 +518,12 @@ descr_trunc = func_with_new_name(_int, 'descr_trunc') descr_conjugate = func_with_new_name(_int, 'descr_conjugate') - descr_get_numerator = func_with_new_name(_int, 'descr_get_numerator') - descr_get_real = func_with_new_name(_int, 'descr_get_real') - - def descr_get_denominator(self, space): - return wrapint(space, 1) - - def descr_get_imag(self, space): - return wrapint(space, 0) - - def descr_coerce(self, space, w_other): - if not isinstance(w_other, W_AbstractIntObject): - return space.w_NotImplemented - return space.newtuple([self, w_other]) - - def descr_long(self, space): + def as_w_long(self, space): # XXX: should try smalllong from pypy.objspace.std.longobject import W_LongObject return W_LongObject.fromint(space, self.intval) - def descr_nonzero(self, space): + def descr_bool(self, space): return space.newbool(self.intval != 0) def descr_invert(self, space): @@ -388,7 +538,7 @@ from pypy.objspace.std.smalllongobject import W_SmallLongObject x = r_longlong(a) return W_SmallLongObject(-x) - return self.descr_long(space).descr_neg(space) + return self.as_w_long(space).descr_neg(space) return wrapint(space, b) def descr_abs(self, space): @@ -400,12 +550,6 @@ x = float(a) return space.newfloat(x) - def descr_oct(self, space): - return space.wrap(oct(self.intval)) - - def descr_hex(self, space): - return space.wrap(hex(self.intval)) - def descr_getnewargs(self, space): return space.newtuple([wrapint(space, self.intval)]) @@ -431,7 +575,12 @@ @unwrap_spec(w_modulus=WrappedDefault(None)) def descr_pow(self, space, w_exponent, w_modulus=None): - if not isinstance(w_exponent, W_IntObject): + if isinstance(w_exponent, W_IntObject): + y = w_exponent.intval + elif isinstance(w_exponent, W_AbstractIntObject): + self = self.as_w_long(space) + return self.descr_pow(space, w_exponent, w_modulus) + else: return space.w_NotImplemented x = self.intval @@ -458,19 +607,26 @@ @unwrap_spec(w_modulus=WrappedDefault(None)) def descr_rpow(self, space, w_base, w_modulus=None): - if not isinstance(w_base, W_IntObject): - return space.w_NotImplemented - return w_base.descr_pow(space, self, w_modulus) + if isinstance(w_base, W_IntObject): + return w_base.descr_pow(space, self, w_modulus) + elif isinstance(w_base, W_AbstractIntObject): + self = self.as_w_long(space) + return self.descr_rpow(space, self, w_modulus) + return space.w_NotImplemented def _make_descr_cmp(opname): op = getattr(operator, opname) - @func_renamer('descr_' + opname) + descr_name = 'descr_' + opname + @func_renamer(descr_name) def descr_cmp(self, space, w_other): - if not isinstance(w_other, W_IntObject): - return space.w_NotImplemented - i = self.intval - j = w_other.intval - return space.newbool(op(i, j)) + if isinstance(w_other, W_IntObject): + i = self.intval + j = w_other.intval + return space.newbool(op(i, j)) + elif isinstance(w_other, W_AbstractIntObject): + self = self.as_w_long(space) + return getattr(self, descr_name)(space, w_other) + return space.w_NotImplemented return descr_cmp descr_lt = _make_descr_cmp('lt') @@ -483,25 +639,27 @@ def _make_generic_descr_binop(opname, ovf=True): op = getattr(operator, opname + '_' if opname in ('and', 'or') else opname) - descr_rname = 'descr_r' + opname + descr_name, descr_rname = 'descr_' + opname, 'descr_r' + opname if ovf: ovf2long = _make_ovf2long(opname) - @func_renamer('descr_' + opname) + @func_renamer(descr_name) def descr_binop(self, space, w_other): - if not isinstance(w_other, W_IntObject): - return space.w_NotImplemented - - x = self.intval - y = w_other.intval - if ovf: - try: - z = ovfcheck(op(x, y)) - except OverflowError: - return ovf2long(space, x, y) - else: - z = op(x, y) - return wrapint(space, z) + if isinstance(w_other, W_IntObject): + x = self.intval + y = w_other.intval + if ovf: + try: + z = ovfcheck(op(x, y)) + except OverflowError: + return ovf2long(space, x, y) + else: + z = op(x, y) + return wrapint(space, z) + elif isinstance(w_other, W_AbstractIntObject): + self = self.as_w_long(space) + return getattr(self, descr_name)(space, w_other) + return space.w_NotImplemented if opname in COMMUTATIVE_OPS: @func_renamer(descr_rname) @@ -511,19 +669,21 @@ @func_renamer(descr_rname) def descr_rbinop(self, space, w_other): - if not isinstance(w_other, W_IntObject): - return space.w_NotImplemented - - x = self.intval - y = w_other.intval - if ovf: - try: - z = ovfcheck(op(y, x)) - except OverflowError: - return ovf2long(space, y, x) - else: - z = op(y, x) - return wrapint(space, z) + if isinstance(w_other, W_IntObject): + x = self.intval + y = w_other.intval + if ovf: + try: + z = ovfcheck(op(y, x)) + except OverflowError: + return ovf2long(space, y, x) + else: + z = op(y, x) + return wrapint(space, z) + elif isinstance(w_other, W_AbstractIntObject): + self = self.as_w_long(space) + return getattr(self, descr_rname)(space, w_other) + return space.w_NotImplemented return descr_binop, descr_rbinop @@ -537,38 +697,43 @@ def _make_descr_binop(func, ovf=True, ovf2small=None): opname = func.__name__[1:] + descr_name, descr_rname = 'descr_' + opname, 'descr_r' + opname if ovf: ovf2long = _make_ovf2long(opname, ovf2small) - @func_renamer('descr_' + opname) + @func_renamer(descr_name) def descr_binop(self, space, w_other): - if not isinstance(w_other, W_IntObject): - return space.w_NotImplemented + if isinstance(w_other, W_IntObject): + x = self.intval + y = w_other.intval + if ovf: + try: + return func(space, x, y) + except OverflowError: + return ovf2long(space, x, y) + else: + return func(space, x, y) + elif isinstance(w_other, W_AbstractIntObject): + self = self.as_w_long(space) + return getattr(self, descr_name)(space, w_other) + return space.w_NotImplemented - x = self.intval - y = w_other.intval - if ovf: - try: - return func(space, x, y) - except OverflowError: - return ovf2long(space, x, y) - else: - return func(space, x, y) - - @func_renamer('descr_r' + opname) + @func_renamer(descr_rname) def descr_rbinop(self, space, w_other): - if not isinstance(w_other, W_IntObject): - return space.w_NotImplemented - - x = self.intval - y = w_other.intval - if ovf: - try: + if isinstance(w_other, W_IntObject): + x = self.intval + y = w_other.intval + if ovf: + try: + return func(space, y, x) + except OverflowError: + return ovf2long(space, y, x) + else: return func(space, y, x) - except OverflowError: - return ovf2long(space, y, x) - else: - return func(space, y, x) + elif isinstance(w_other, W_AbstractIntObject): + self = self.as_w_long(space) + return getattr(self, descr_rname)(space, w_other) + return space.w_NotImplemented return descr_binop, descr_rbinop @@ -609,11 +774,30 @@ if isinstance(e, InvalidBaseError): w_msg = space.wrap(e.msg) else: - w_msg = space.wrap('%s: %s' % (e.msg, - space.str_w(space.repr(w_source)))) + w_msg = space.wrap(u'%s: %s' % (unicode(e.msg), + space.unicode_w(space.repr(w_source)))) return OperationError(space.w_ValueError, w_msg) +divmod_near = applevel(''' + def divmod_near(a, b): + """Return a pair (q, r) such that a = b * q + r, and abs(r) + <= abs(b)/2, with equality possible only if q is even. In + other words, q == a / b, rounded to the nearest integer using + round-half-to-even.""" + q, r = divmod(a, b) + # round up if either r / b > 0.5, or r / b == 0.5 and q is + # odd. The expression r / b > 0.5 is equivalent to 2 * r > b + # if b is positive, 2 * r < b if b negative. + greater_than_half = 2*r > b if b > 0 else 2*r < b + exactly_half = 2*r == b + if greater_than_half or exactly_half and q % 2 == 1: + q += 1 + r -= b + return q, r +''', filename=__file__).interphook('divmod_near') + + def _recover_with_smalllong(space): """True if there is a chance that a SmallLong would fit when an Int does not @@ -623,57 +807,71 @@ @jit.elidable -def _string_to_int_or_long(space, w_source, string, base=10): - w_longval = None - value = 0 +def _string_to_int_or_long(space, w_inttype, w_source, string, base=10): try: value = string_to_int(string, base) except ParseStringError as e: raise wrap_parsestringerror(space, e, w_source) except ParseStringOverflowError as e: - w_longval = _retry_to_w_long(space, e.parser, w_source) - return value, w_longval + return _retry_to_w_long(space, e.parser, w_inttype, w_source) + if space.is_w(w_inttype, space.w_int): + w_result = wrapint(space, value) + else: + w_result = space.allocate_instance(W_IntObject, w_inttype) + W_IntObject.__init__(w_result, value) + return w_result -def _retry_to_w_long(space, parser, w_source): + +def _retry_to_w_long(space, parser, w_inttype, w_source): + from pypy.objspace.std.longobject import newbigint parser.rewind() try: bigint = rbigint._from_numberstring_parser(parser) except ParseStringError as e: raise wrap_parsestringerror(space, e, w_source) - return space.newlong_from_rbigint(bigint) + return newbigint(space, w_inttype, bigint) def _new_int(space, w_inttype, w_x, w_base=None): + from pypy.objspace.std.longobject import W_LongObject, newbigint + if space.config.objspace.std.withsmalllong: + from pypy.objspace.std.smalllongobject import W_SmallLongObject + else: + W_SmallLongObject = None + w_longval = None w_value = w_x # 'x' is the keyword argument name in CPython value = 0 if w_base is None: # check for easy cases if type(w_value) is W_IntObject: + if space.is_w(w_inttype, space.w_int): + return w_value value = w_value.intval - elif (space.lookup(w_value, '__int__') is not None or - space.lookup(w_value, '__trunc__') is not None): - # otherwise, use the __int__() or the __trunc__() methods - w_obj = w_value - if space.lookup(w_obj, '__int__') is None: - w_obj = space.trunc(w_obj) - w_obj = space.int(w_obj) - # 'int(x)' should return what x.__int__() returned, which should - # be an int or long or a subclass thereof. + w_obj = space.allocate_instance(W_IntObject, w_inttype) + W_IntObject.__init__(w_obj, value) + return w_obj + elif type(w_value) is W_LongObject: if space.is_w(w_inttype, space.w_int): - return w_obj - # int_w is effectively what we want in this case, - # we cannot construct a subclass of int instance with an - # an overflowing long - value = space.int_w(w_obj) - elif space.isinstance_w(w_value, space.w_str): - value, w_longval = _string_to_int_or_long(space, w_value, - space.str_w(w_value)) + return w_value + return newbigint(space, w_inttype, w_value.num) + elif W_SmallLongObject and type(w_value) is W_SmallLongObject: + if space.is_w(w_inttype, space.w_int): + return w_value + return newbigint(space, w_inttype, space.bigint_w(w_value)) + elif space.lookup(w_value, '__int__') is not None: + return _from_intlike(space, w_inttype, w_value) + elif space.lookup(w_value, '__trunc__') is not None: + return _from_intlike(space, w_inttype, space.trunc(w_value)) elif space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w - string = unicode_to_decimal_w(space, w_value) - value, w_longval = _string_to_int_or_long(space, w_value, string) + return _string_to_int_or_long(space, w_inttype, w_value, + unicode_to_decimal_w(space, w_value)) + elif (space.isinstance_w(w_value, space.w_bytearray) or + space.isinstance_w(w_value, space.w_bytes)): + return _string_to_int_or_long(space, w_inttype, w_value, + space.bufferstr_w(w_value)) else: # If object supports the buffer interface try: @@ -686,186 +884,130 @@ "not '%T'", w_value) else: buf = space.interp_w(Buffer, w_buffer) - value, w_longval = _string_to_int_or_long(space, w_value, - buf.as_str()) - ok = True + return _string_to_int_or_long(space, w_inttype, w_value, + buf.as_str()) else: - base = space.int_w(w_base) + try: + base = space.int_w(w_base) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + base = 37 # this raises the right error in string_to_bigint() if space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w s = unicode_to_decimal_w(space, w_value) else: try: - s = space.str_w(w_value) + s = space.bufferstr_w(w_value) except OperationError as e: raise oefmt(space.w_TypeError, "int() can't convert non-string with explicit " "base") - value, w_longval = _string_to_int_or_long(space, w_value, s, base) + return _string_to_int_or_long(space, w_inttype, w_value, s, base) - if w_longval is not None: - if not space.is_w(w_inttype, space.w_int): - raise oefmt(space.w_OverflowError, - "long int too large to convert to int") - return w_longval - elif space.is_w(w_inttype, space.w_int): - # common case - return wrapint(space, value) - else: - w_obj = space.allocate_instance(W_IntObject, w_inttype) - W_IntObject.__init__(w_obj, value) + +def _from_intlike(space, w_inttype, w_intlike): + w_obj = space.int(w_intlike) + if space.is_w(w_inttype, space.w_int): return w_obj + from pypy.objspace.std.longobject import newbigint + return newbigint(space, w_inttype, space.bigint_w(w_obj)) -W_IntObject.typedef = StdTypeDef("int", - __doc__ = """int(x=0) -> int or long -int(x, base=10) -> int or long +W_AbstractIntObject.typedef = StdTypeDef("int", + __doc__ = """int(x=0) -> integer +int(x, base=10) -> integer Convert a number or string to an integer, or return 0 if no arguments -are given. If x is floating point, the conversion truncates towards zero. -If x is outside the integer range, the function returns a long instead. +are given. If x is a number, return x.__int__(). For floating point +numbers, this truncates towards zero. -If x is not a number or if base is given, then x must be a string or -Unicode object representing an integer literal in the given base. The -literal can be preceded by '+' or '-' and be surrounded by whitespace. -The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to -interpret the base from the string as an integer literal. +If x is not a number or if base is given, then x must be a string, +bytes, or bytearray instance representing an integer literal in the +given base. The literal can be preceded by '+' or '-' and be surrounded +by whitespace. The base defaults to 10. Valid bases are 0 and 2-36. +Base 0 means to interpret the base from the string as an integer literal. >>> int('0b100', base=0) 4""", __new__ = interp2app(W_IntObject.descr_new), numerator = typedef.GetSetProperty( - W_IntObject.descr_get_numerator, + W_AbstractIntObject.descr_get_numerator, doc="the numerator of a rational number in lowest terms"), denominator = typedef.GetSetProperty( - W_IntObject.descr_get_denominator, + W_AbstractIntObject.descr_get_denominator, doc="the denominator of a rational number in lowest terms"), real = typedef.GetSetProperty( - W_IntObject.descr_get_real, + W_AbstractIntObject.descr_get_real, doc="the real part of a complex number"), imag = typedef.GetSetProperty( - W_IntObject.descr_get_imag, + W_AbstractIntObject.descr_get_imag, doc="the imaginary part of a complex number"), - __repr__ = interp2app(W_IntObject.descr_repr, - doc=W_AbstractIntObject.descr_repr.__doc__), - __str__ = interp2app(W_IntObject.descr_str, - doc=W_AbstractIntObject.descr_str.__doc__), + from_bytes = interp2app(W_AbstractIntObject.descr_from_bytes, + as_classmethod=True), + to_bytes = interpindirect2app(W_AbstractIntObject.descr_to_bytes), - conjugate = interp2app(W_IntObject.descr_conjugate, - doc=W_AbstractIntObject.descr_conjugate.__doc__), - bit_length = interp2app(W_IntObject.descr_bit_length, - doc=W_AbstractIntObject.descr_bit_length.__doc__), - __format__ = interp2app(W_IntObject.descr_format, - doc=W_AbstractIntObject.descr_format.__doc__), - __hash__ = interp2app(W_IntObject.descr_hash, - doc=W_AbstractIntObject.descr_hash.__doc__), - __coerce__ = interp2app(W_IntObject.descr_coerce, - doc=W_AbstractIntObject.descr_coerce.__doc__), - __oct__ = interp2app(W_IntObject.descr_oct, - doc=W_AbstractIntObject.descr_oct.__doc__), - __hex__ = interp2app(W_IntObject.descr_hex, - doc=W_AbstractIntObject.descr_hex.__doc__), - __getnewargs__ = interp2app( - W_IntObject.descr_getnewargs, - doc=W_AbstractIntObject.descr_getnewargs.__doc__), + __repr__ = interpindirect2app(W_AbstractIntObject.descr_repr), + __str__ = interpindirect2app(W_AbstractIntObject.descr_str), - __int__ = interp2app(W_IntObject.int, - doc=W_AbstractIntObject.int.__doc__), - __long__ = interp2app(W_IntObject.descr_long, - doc=W_AbstractIntObject.descr_long.__doc__), - __index__ = interp2app(W_IntObject.descr_index, - doc=W_AbstractIntObject.descr_index.__doc__), - __trunc__ = interp2app(W_IntObject.descr_trunc, - doc=W_AbstractIntObject.descr_trunc.__doc__), - __float__ = interp2app(W_IntObject.descr_float, - doc=W_AbstractIntObject.descr_float.__doc__), + conjugate = interpindirect2app(W_AbstractIntObject.descr_conjugate), + bit_length = interpindirect2app(W_AbstractIntObject.descr_bit_length), + __format__ = interpindirect2app(W_AbstractIntObject.descr_format), + __hash__ = interpindirect2app(W_AbstractIntObject.descr_hash), + __getnewargs__ = interpindirect2app(W_AbstractIntObject.descr_getnewargs), - __pos__ = interp2app(W_IntObject.descr_pos, - doc=W_AbstractIntObject.descr_pos.__doc__), - __neg__ = interp2app(W_IntObject.descr_neg, - doc=W_AbstractIntObject.descr_neg.__doc__), - __abs__ = interp2app(W_IntObject.descr_abs, - doc=W_AbstractIntObject.descr_abs.__doc__), - __nonzero__ = interp2app(W_IntObject.descr_nonzero, - doc=W_AbstractIntObject.descr_nonzero.__doc__), - __invert__ = interp2app(W_IntObject.descr_invert, - doc=W_AbstractIntObject.descr_invert.__doc__), + __int__ = interpindirect2app(W_AbstractIntObject.int), + __index__ = interpindirect2app(W_AbstractIntObject.descr_index), + __trunc__ = interpindirect2app(W_AbstractIntObject.descr_trunc), + __float__ = interpindirect2app(W_AbstractIntObject.descr_float), + __round__ = interpindirect2app(W_AbstractIntObject.descr_round), - __lt__ = interp2app(W_IntObject.descr_lt, - doc=W_AbstractIntObject.descr_lt.__doc__), - __le__ = interp2app(W_IntObject.descr_le, - doc=W_AbstractIntObject.descr_le.__doc__), - __eq__ = interp2app(W_IntObject.descr_eq, - doc=W_AbstractIntObject.descr_eq.__doc__), - __ne__ = interp2app(W_IntObject.descr_ne, - doc=W_AbstractIntObject.descr_ne.__doc__), - __gt__ = interp2app(W_IntObject.descr_gt, - doc=W_AbstractIntObject.descr_gt.__doc__), - __ge__ = interp2app(W_IntObject.descr_ge, - doc=W_AbstractIntObject.descr_ge.__doc__), + __pos__ = interpindirect2app(W_AbstractIntObject.descr_pos), + __neg__ = interpindirect2app(W_AbstractIntObject.descr_neg), + __abs__ = interpindirect2app(W_AbstractIntObject.descr_abs), + __bool__ = interpindirect2app(W_AbstractIntObject.descr_bool), + __invert__ = interpindirect2app(W_AbstractIntObject.descr_invert), - __add__ = interp2app(W_IntObject.descr_add, - doc=W_AbstractIntObject.descr_add.__doc__), - __radd__ = interp2app(W_IntObject.descr_radd, - doc=W_AbstractIntObject.descr_radd.__doc__), - __sub__ = interp2app(W_IntObject.descr_sub, - doc=W_AbstractIntObject.descr_sub.__doc__), - __rsub__ = interp2app(W_IntObject.descr_rsub, - doc=W_AbstractIntObject.descr_rsub.__doc__), - __mul__ = interp2app(W_IntObject.descr_mul, - doc=W_AbstractIntObject.descr_mul.__doc__), - __rmul__ = interp2app(W_IntObject.descr_rmul, - doc=W_AbstractIntObject.descr_rmul.__doc__), + __lt__ = interpindirect2app(W_AbstractIntObject.descr_lt), + __le__ = interpindirect2app(W_AbstractIntObject.descr_le), + __eq__ = interpindirect2app(W_AbstractIntObject.descr_eq), + __ne__ = interpindirect2app(W_AbstractIntObject.descr_ne), + __gt__ = interpindirect2app(W_AbstractIntObject.descr_gt), + __ge__ = interpindirect2app(W_AbstractIntObject.descr_ge), - __and__ = interp2app(W_IntObject.descr_and, - doc=W_AbstractIntObject.descr_and.__doc__), - __rand__ = interp2app(W_IntObject.descr_rand, - doc=W_AbstractIntObject.descr_rand.__doc__), - __or__ = interp2app(W_IntObject.descr_or, - doc=W_AbstractIntObject.descr_or.__doc__), - __ror__ = interp2app(W_IntObject.descr_ror, - doc=W_AbstractIntObject.descr_ror.__doc__), - __xor__ = interp2app(W_IntObject.descr_xor, - doc=W_AbstractIntObject.descr_xor.__doc__), - __rxor__ = interp2app(W_IntObject.descr_rxor, - doc=W_AbstractIntObject.descr_rxor.__doc__), + __add__ = interpindirect2app(W_AbstractIntObject.descr_add), + __radd__ = interpindirect2app(W_AbstractIntObject.descr_radd), + __sub__ = interpindirect2app(W_AbstractIntObject.descr_sub), + __rsub__ = interpindirect2app(W_AbstractIntObject.descr_rsub), + __mul__ = interpindirect2app(W_AbstractIntObject.descr_mul), + __rmul__ = interpindirect2app(W_AbstractIntObject.descr_rmul), - __lshift__ = interp2app(W_IntObject.descr_lshift, - doc=W_AbstractIntObject.descr_lshift.__doc__), - __rlshift__ = interp2app(W_IntObject.descr_rlshift, - doc=W_AbstractIntObject.descr_rlshift.__doc__), - __rshift__ = interp2app(W_IntObject.descr_rshift, - doc=W_AbstractIntObject.descr_rshift.__doc__), - __rrshift__ = interp2app(W_IntObject.descr_rrshift, - doc=W_AbstractIntObject.descr_rrshift.__doc__), + __and__ = interpindirect2app(W_AbstractIntObject.descr_and), + __rand__ = interpindirect2app(W_AbstractIntObject.descr_rand), + __or__ = interpindirect2app(W_AbstractIntObject.descr_or), + __ror__ = interpindirect2app(W_AbstractIntObject.descr_ror), + __xor__ = interpindirect2app(W_AbstractIntObject.descr_xor), + __rxor__ = interpindirect2app(W_AbstractIntObject.descr_rxor), - __floordiv__ = interp2app(W_IntObject.descr_floordiv, - doc=W_AbstractIntObject.descr_floordiv.__doc__), - __rfloordiv__ = interp2app( - W_IntObject.descr_rfloordiv, - doc=W_AbstractIntObject.descr_rfloordiv.__doc__), - __div__ = interp2app(W_IntObject.descr_div, - doc=W_AbstractIntObject.descr_div.__doc__), - __rdiv__ = interp2app(W_IntObject.descr_rdiv, - doc=W_AbstractIntObject.descr_rdiv.__doc__), - __truediv__ = interp2app(W_IntObject.descr_truediv, - doc=W_AbstractIntObject.descr_truediv.__doc__), - __rtruediv__ = interp2app(W_IntObject.descr_rtruediv, - doc=W_AbstractIntObject.descr_rtruediv.__doc__), - __mod__ = interp2app(W_IntObject.descr_mod, - doc=W_AbstractIntObject.descr_mod.__doc__), - __rmod__ = interp2app(W_IntObject.descr_rmod, - doc=W_AbstractIntObject.descr_rmod.__doc__), - __divmod__ = interp2app(W_IntObject.descr_divmod, - doc=W_AbstractIntObject.descr_divmod.__doc__), - __rdivmod__ = interp2app(W_IntObject.descr_rdivmod, - doc=W_AbstractIntObject.descr_rdivmod.__doc__), + __lshift__ = interpindirect2app(W_AbstractIntObject.descr_lshift), + __rlshift__ = interpindirect2app(W_AbstractIntObject.descr_rlshift), + __rshift__ = interpindirect2app(W_AbstractIntObject.descr_rshift), + __rrshift__ = interpindirect2app(W_AbstractIntObject.descr_rrshift), - __pow__ = interp2app(W_IntObject.descr_pow, - doc=W_AbstractIntObject.descr_pow.__doc__), - __rpow__ = interp2app(W_IntObject.descr_rpow, - doc=W_AbstractIntObject.descr_rpow.__doc__), + __floordiv__ = interpindirect2app(W_AbstractIntObject.descr_floordiv), + __rfloordiv__ = interpindirect2app(W_AbstractIntObject.descr_rfloordiv), + __div__ = interpindirect2app(W_AbstractIntObject.descr_div), + __rdiv__ = interpindirect2app(W_AbstractIntObject.descr_rdiv), + __truediv__ = interpindirect2app(W_AbstractIntObject.descr_truediv), + __rtruediv__ = interpindirect2app(W_AbstractIntObject.descr_rtruediv), + __mod__ = interpindirect2app(W_AbstractIntObject.descr_mod), + __rmod__ = interpindirect2app(W_AbstractIntObject.descr_rmod), + __divmod__ = interpindirect2app(W_AbstractIntObject.descr_divmod), + __rdivmod__ = interpindirect2app(W_AbstractIntObject.descr_rdivmod), + + __pow__ = interpindirect2app(W_AbstractIntObject.descr_pow), + __rpow__ = interpindirect2app(W_AbstractIntObject.descr_rpow), ) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -1,58 +1,36 @@ -"""The builtin long implementation""" +"""The builtin int type based on rbigint (the old long type)""" import functools from rpython.rlib.objectmodel import specialize -from rpython.rlib.rbigint import rbigint -from rpython.rlib.rstring import ParseStringError +from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rbigint import SHIFT, _widen_digit, rbigint from rpython.tool.sourcetools import func_renamer, func_with_new_name -from pypy.interpreter import typedef from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import Buffer -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import ( - WrappedDefault, interp2app, interpindirect2app, unwrap_spec) +from pypy.interpreter.error import oefmt +from pypy.interpreter.gateway import WrappedDefault, unwrap_spec from pypy.objspace.std import newformat -from pypy.objspace.std.intobject import W_AbstractIntObject -from pypy.objspace.std.model import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG) -from pypy.objspace.std.stdtypedef import StdTypeDef - - -HASH_BITS = 61 if sys.maxsize > 2 ** 31 - 1 else 31 -HASH_MODULUS = 2 ** HASH_BITS - 1 +from pypy.objspace.std.intobject import ( + HASH_BITS, HASH_MODULUS, W_AbstractIntObject, W_IntObject) +from pypy.objspace.std.model import COMMUTATIVE_OPS def delegate_other(func): @functools.wraps(func) def delegated(self, space, w_other): - if isinstance(w_other, W_AbstractIntObject): - w_other = w_other.descr_long(space) + if isinstance(w_other, W_IntObject): + w_other = w_other.as_w_long(space) elif not isinstance(w_other, W_AbstractLongObject): return space.w_NotImplemented return func(self, space, w_other) return delegated -class W_AbstractLongObject(W_Root): +class W_AbstractLongObject(W_AbstractIntObject): __slots__ = () - def is_w(self, space, w_other): - if not isinstance(w_other, W_AbstractLongObject): - return False - if self.user_overridden_class or w_other.user_overridden_class: - return self is w_other - return space.bigint_w(self).eq(space.bigint_w(w_other)) - - def immutable_unique_id(self, space): - if self.user_overridden_class: - return None - b = space.bigint_w(self) - b = b.lshift(3).or_(rbigint.fromint(IDTAG_LONG)) - return space.newlong_from_rbigint(b) - def unwrap(self, space): return self.longval() @@ -66,18 +44,9 @@ return space.newtuple([newlong(space, self.asbigint())]) def descr_conjugate(self, space): - """Returns self, the complex conjugate of any long.""" - return space.long(self) + return self.int(space) def descr_bit_length(self, space): - """long.bit_length() -> int or long - - Number of bits necessary to represent self in binary. - >>> bin(37L) - '0b100101' - >>> (37L).bit_length() - 6 - """ bigint = space.bigint_w(self) try: return space.wrap(bigint.bit_length()) @@ -88,126 +57,31 @@ try: f = self.asbigint().truediv(w_other.asbigint()) except ZeroDivisionError: - raise oefmt(space.w_ZeroDivisionError, - "long division or modulo by zero") + raise oefmt(space.w_ZeroDivisionError, "division by zero") except OverflowError: raise oefmt(space.w_OverflowError, - "long/long too large for a float") + "integer division result too large for a float") return space.newfloat(f) @delegate_other def descr_truediv(self, space, w_other): - """x.__truediv__(y) <==> x/y""" return W_AbstractLongObject._truediv(self, space, w_other) @delegate_other def descr_rtruediv(self, space, w_other): - """x.__rtruediv__(y) <==> y/x""" return W_AbstractLongObject._truediv(w_other, space, self) - @delegate_other - def descr_coerce(self, space, w_other): - """x.__coerce__(y) <==> coerce(x, y)""" - return space.newtuple([self, w_other]) - - def descr_get_numerator(self, space): - return space.long(self) - descr_get_real = func_with_new_name(descr_get_numerator, 'descr_get_real') - def descr_format(self, space, w_format_spec): return newformat.run_formatter(space, w_format_spec, "format_int_or_long", self, newformat.LONG_KIND) - def descr_get_denominator(self, space): - return space.newlong(1) + def descr_hash(self, space): + return space.wrap(_hash_long(space, self.asbigint())) - def descr_get_imag(self, space): - return space.newlong(0) - - def _make_descr_unaryop(opname): - op = getattr(rbigint, opname) - @func_renamer('descr_' + opname) - def descr_unaryop(self, space): - return space.wrap(op(self.asbigint())) - descr_unaryop.__doc__ = 'x.__%s__(y) <==> %s(x, y)' % (opname, opname) - return descr_unaryop - - descr_repr = _make_descr_unaryop('repr') - descr_str = _make_descr_unaryop('str') - descr_hash = _make_descr_unaryop('hash') - descr_oct = _make_descr_unaryop('oct') - descr_hex = _make_descr_unaryop('hex') - - def descr_pow(self, space, w_exponent, w_modulus=None): - """x.__pow__(y[, z]) <==> pow(x, y[, z])""" - raise NotImplementedError - descr_rpow = func_with_new_name(descr_pow, 'descr_rpow') - descr_rpow.__doc__ = "y.__rpow__(x[, z]) <==> pow(x, y[, z])" - - def _abstract_unaryop(opname, doc=None): - @func_renamer('descr_' + opname) - def descr_unaryop(self, space): - raise NotImplementedError - descr_unaryop.__doc__ = doc - return descr_unaryop - - descr_long = _abstract_unaryop('long', "x.__long__() <==> long(x)") - descr_float = _abstract_unaryop('float', "x.__float__() <==> float(x)") - descr_index = _abstract_unaryop( - 'index', "x[y:z] <==> x[y.__index__():z.__index__()]") - descr_trunc = _abstract_unaryop('trunc', - "Truncating an Integral returns itself.") - descr_pos = _abstract_unaryop('pos', "x.__pos__() <==> +x") - descr_neg = _abstract_unaryop('neg', "x.__neg__() <==> -x") - descr_abs = _abstract_unaryop('abs', "x.__abs__() <==> abs(x)") - descr_nonzero = _abstract_unaryop('nonzero', "x.__nonzero__() <==> x != 0") - descr_invert = _abstract_unaryop('invert', "x.__invert__() <==> ~x") - - def _abstract_cmpop(opname): - @func_renamer('descr_' + opname) - def descr_cmp(self, space, w_other): - raise NotImplementedError - descr_cmp.__doc__ = 'x.__%s__(y) <==> x%sy' % (opname, CMP_OPS[opname]) - return descr_cmp - - descr_lt = _abstract_cmpop('lt') - descr_le = _abstract_cmpop('le') - descr_eq = _abstract_cmpop('eq') - descr_ne = _abstract_cmpop('ne') - descr_gt = _abstract_cmpop('gt') - descr_ge = _abstract_cmpop('ge') - - def _abstract_binop(opname): - oper = BINARY_OPS.get(opname) - if oper == '%': - oper = '%%' - oper = '%s(%%s, %%s)' % opname if not oper else '%%s%s%%s' % oper - @func_renamer('descr_' + opname) - def descr_binop(self, space, w_other): - raise NotImplementedError - descr_binop.__doc__ = "x.__%s__(y) <==> %s" % (opname, - oper % ('x', 'y')) - descr_rbinop = func_with_new_name(descr_binop, 'descr_r' + opname) - descr_rbinop.__doc__ = "x.__r%s__(y) <==> %s" % (opname, - oper % ('y', 'x')) - return descr_binop, descr_rbinop - - descr_add, descr_radd = _abstract_binop('add') - descr_sub, descr_rsub = _abstract_binop('sub') - descr_mul, descr_rmul = _abstract_binop('mul') - - descr_and, descr_rand = _abstract_binop('and') - descr_or, descr_ror = _abstract_binop('or') - descr_xor, descr_rxor = _abstract_binop('xor') - - descr_lshift, descr_rlshift = _abstract_binop('lshift') - descr_rshift, descr_rrshift = _abstract_binop('rshift') - - descr_floordiv, descr_rfloordiv = _abstract_binop('floordiv') - descr_div, descr_rdiv = _abstract_binop('div') - descr_mod, descr_rmod = _abstract_binop('mod') - descr_divmod, descr_rdivmod = _abstract_binop('divmod') + def descr_str(self, space): + return space.wrap(self.asbigint().str()) + descr_repr = descr_str class W_LongObject(W_AbstractLongObject): @@ -230,7 +104,7 @@ return self.num.tofloat() except OverflowError: raise oefmt(space.w_OverflowError, - "long int too large to convert to float") + "int too large to convert to float") def toint(self): return self.num.toint() @@ -253,7 +127,7 @@ return self.num.toint() except OverflowError: raise oefmt(space.w_OverflowError, - "long int too large to convert to int") + "int too large to convert to int") def uint_w(self, space): try: @@ -263,7 +137,7 @@ "cannot convert negative integer to unsigned int") except OverflowError: raise oefmt(space.w_OverflowError, - "long int too large to convert to unsigned int") + "int too large to convert to unsigned int") def bigint_w(self, space): return self.num @@ -272,13 +146,11 @@ return self.tofloat(space) def int(self, space): - if (type(self) is not W_LongObject and - space.is_overloaded(self, space.w_long, '__int__')): - return W_Root.int(self, space) - try: - return space.newint(self.num.toint()) - except OverflowError: - return self.descr_long(space) + if type(self) is W_LongObject: + return self + if not space.is_overloaded(self, space.w_int, '__int__'): + return W_LongObject(self.num) + return W_Root.int(self, space) def asbigint(self): return self.num @@ -286,24 +158,18 @@ def __repr__(self): return '' % self.num.tolong() - def descr_long(self, space): - # __long__ is supposed to do nothing, unless it has a derived - # long object, where it should return an exact one. - if space.is_w(space.type(self), space.w_long): - return self - return W_LongObject(self.num) - descr_index = descr_trunc = descr_pos = descr_long + descr_index = descr_trunc = descr_pos = int def descr_float(self, space): return space.newfloat(self.tofloat(space)) - def descr_nonzero(self, space): + def descr_bool(self, space): return space.newbool(self.num.tobool()) @unwrap_spec(w_modulus=WrappedDefault(None)) def descr_pow(self, space, w_exponent, w_modulus=None): - if isinstance(w_exponent, W_AbstractIntObject): - w_exponent = w_exponent.descr_long(space) + if isinstance(w_exponent, W_IntObject): + w_exponent = w_exponent.as_w_long(space) elif not isinstance(w_exponent, W_AbstractLongObject): return space.w_NotImplemented @@ -313,8 +179,8 @@ w_exponent = w_exponent.descr_float(space) return space.pow(self, w_exponent, space.w_None) return W_LongObject(self.num.pow(w_exponent.asbigint())) - elif isinstance(w_modulus, W_AbstractIntObject): - w_modulus = w_modulus.descr_long(space) + elif isinstance(w_modulus, W_IntObject): + w_modulus = w_modulus.as_w_long(space) elif not isinstance(w_modulus, W_AbstractLongObject): return space.w_NotImplemented @@ -330,8 +196,8 @@ @unwrap_spec(w_modulus=WrappedDefault(None)) def descr_rpow(self, space, w_base, w_modulus=None): - if isinstance(w_base, W_AbstractIntObject): - w_base = w_base.descr_long(space) + if isinstance(w_base, W_IntObject): + w_base = w_base.as_w_long(space) elif not isinstance(w_base, W_AbstractLongObject): return space.w_NotImplemented return w_base.descr_pow(space, self, w_modulus) @@ -433,7 +299,7 @@ z = self.num.floordiv(w_other.asbigint()) except ZeroDivisionError: raise oefmt(space.w_ZeroDivisionError, - "long division or modulo by zero") + "integer division or modulo by zero") return newlong(space, z) descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) @@ -445,7 +311,7 @@ z = self.num.mod(w_other.asbigint()) except ZeroDivisionError: raise oefmt(space.w_ZeroDivisionError, - "long division or modulo by zero") + "integer division or modulo by zero") return newlong(space, z) descr_mod, descr_rmod = _make_descr_binop(_mod) @@ -454,11 +320,30 @@ div, mod = self.num.divmod(w_other.asbigint()) except ZeroDivisionError: raise oefmt(space.w_ZeroDivisionError, - "long division or modulo by zero") + "integer division or modulo by zero") return space.newtuple([newlong(space, div), newlong(space, mod)]) descr_divmod, descr_rdivmod = _make_descr_binop(_divmod) +def _hash_long(space, v): + i = v.numdigits() - 1 + if i == -1: + return 0 + + # compute v % HASH_MODULUS + x = _widen_digit(0) + while i >= 0: + x = (x << SHIFT) + v.widedigit(i) + # efficient x % HASH_MODULUS: as HASH_MODULUS is a Mersenne + # prime + x = (x & HASH_MODULUS) + (x >> HASH_BITS) + while x >= HASH_MODULUS: + x -= HASH_MODULUS + i -= 1 + x = intmask(intmask(x) * v.sign) + return -2 if x == -1 else x + + def newlong(space, bigint): """Turn the bigint into a W_LongObject. If withsmalllong is enabled, check if the bigint would fit in a smalllong, and return a @@ -483,85 +368,10 @@ try: return W_LongObject.fromfloat(space, floatval) except OverflowError: - raise OperationError( - space.w_OverflowError, - space.wrap("cannot convert float infinity to integer")) + raise oefmt(space.w_OverflowError, + "cannot convert float infinity to integer") except ValueError: - raise OperationError(space.w_ValueError, - space.wrap("cannot convert float NaN to integer")) - - - at unwrap_spec(w_x=WrappedDefault(0)) -def descr__new__(space, w_longtype, w_x, w_base=None): - if space.config.objspace.std.withsmalllong: - from pypy.objspace.std.smalllongobject import W_SmallLongObject - else: - W_SmallLongObject = None - - w_value = w_x # 'x' is the keyword argument name in CPython - if w_base is None: - # check for easy cases - if (W_SmallLongObject and type(w_value) is W_SmallLongObject - and space.is_w(w_longtype, space.w_long)): - return w_value - elif type(w_value) is W_LongObject: - return newbigint(space, w_longtype, w_value.num) - elif (space.lookup(w_value, '__long__') is not None or - space.lookup(w_value, '__int__') is not None): - w_obj = space.long(w_value) - return newbigint(space, w_longtype, space.bigint_w(w_obj)) - elif space.lookup(w_value, '__trunc__') is not None: - w_obj = space.trunc(w_value) - # :-( blame CPython 2.7 - if space.lookup(w_obj, '__long__') is not None: - w_obj = space.long(w_obj) - else: - w_obj = space.int(w_obj) - return newbigint(space, w_longtype, space.bigint_w(w_obj)) - elif space.isinstance_w(w_value, space.w_str): - return _string_to_w_long(space, w_longtype, w_value, - space.str_w(w_value)) - elif space.isinstance_w(w_value, space.w_unicode): - from pypy.objspace.std.unicodeobject import unicode_to_decimal_w - return _string_to_w_long(space, w_longtype, w_value, - unicode_to_decimal_w(space, w_value)) - else: - try: - w_buffer = space.buffer(w_value) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise oefmt(space.w_TypeError, - "long() argument must be a string or a number, " - "not '%T'", w_value) - else: - buf = space.interp_w(Buffer, w_buffer) - return _string_to_w_long(space, w_longtype, w_value, - buf.as_str()) - else: - base = space.int_w(w_base) - - if space.isinstance_w(w_value, space.w_unicode): - from pypy.objspace.std.unicodeobject import unicode_to_decimal_w - s = unicode_to_decimal_w(space, w_value) - else: - try: - s = space.str_w(w_value) - except OperationError: - raise oefmt(space.w_TypeError, - "long() can't convert non-string with explicit " - "base") - return _string_to_w_long(space, w_longtype, w_value, s, base) - - -def _string_to_w_long(space, w_longtype, w_source, string, base=10): - try: - bigint = rbigint.fromstr2(string, base) - except ParseStringError as e: - from pypy.objspace.std.intobject import wrap_parsestringerror - raise wrap_parsestringerror(space, e, w_source) - return newbigint(space, w_longtype, bigint) -_string_to_w_long._dont_inline_ = True + raise oefmt(space.w_ValueError, "cannot convert float NaN to integer") def newbigint(space, w_longtype, bigint): @@ -571,7 +381,7 @@ longobject.py, but takes an explicit w_longtype argument. """ if (space.config.objspace.std.withsmalllong - and space.is_w(w_longtype, space.w_long)): + and space.is_w(w_longtype, space.w_int)): try: z = bigint.tolonglong() except OverflowError: @@ -582,98 +392,3 @@ w_obj = space.allocate_instance(W_LongObject, w_longtype) W_LongObject.__init__(w_obj, bigint) return w_obj - - -W_AbstractLongObject.typedef = StdTypeDef("long", - __doc__ = """long(x=0) -> long -long(x, base=10) -> long - -Convert a number or string to a long integer, or return 0L if no arguments -are given. If x is floating point, the conversion truncates towards zero. - -If x is not a number or if base is given, then x must be a string or -Unicode object representing an integer literal in the given base. The -literal can be preceded by '+' or '-' and be surrounded by whitespace. -The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to -interpret the base from the string as an integer literal. ->>> int('0b100', base=0) -4L""", - __new__ = interp2app(descr__new__), - - numerator = typedef.GetSetProperty( - W_AbstractLongObject.descr_get_numerator, - doc="the numerator of a rational number in lowest terms"), - denominator = typedef.GetSetProperty( - W_AbstractLongObject.descr_get_denominator, - doc="the denominator of a rational number in lowest terms"), - real = typedef.GetSetProperty( - W_AbstractLongObject.descr_get_real, - doc="the real part of a complex number"), - imag = typedef.GetSetProperty( - W_AbstractLongObject.descr_get_imag, - doc="the imaginary part of a complex number"), - - __repr__ = interp2app(W_AbstractLongObject.descr_repr), - __str__ = interp2app(W_AbstractLongObject.descr_str), - - conjugate = interpindirect2app(W_AbstractLongObject.descr_conjugate), - bit_length = interpindirect2app(W_AbstractLongObject.descr_bit_length), - __format__ = interpindirect2app(W_AbstractLongObject.descr_format), - __hash__ = interpindirect2app(W_AbstractLongObject.descr_hash), - __coerce__ = interpindirect2app(W_AbstractLongObject.descr_coerce), - __oct__ = interpindirect2app(W_AbstractLongObject.descr_oct), - __hex__ = interpindirect2app(W_AbstractLongObject.descr_hex), - __getnewargs__ = interpindirect2app(W_AbstractLongObject.descr_getnewargs), - - __int__ = interpindirect2app(W_AbstractLongObject.int), - __long__ = interpindirect2app(W_AbstractLongObject.descr_long), - __index__ = interpindirect2app(W_AbstractLongObject.descr_index), - __trunc__ = interpindirect2app(W_AbstractLongObject.descr_trunc), - __float__ = interpindirect2app(W_AbstractLongObject.descr_float), - - __pos__ = interpindirect2app(W_AbstractLongObject.descr_pos), - __neg__ = interpindirect2app(W_AbstractLongObject.descr_neg), - __abs__ = interpindirect2app(W_AbstractLongObject.descr_abs), - __nonzero__ = interpindirect2app(W_AbstractLongObject.descr_nonzero), - __invert__ = interpindirect2app(W_AbstractLongObject.descr_invert), - - __lt__ = interpindirect2app(W_AbstractLongObject.descr_lt), - __le__ = interpindirect2app(W_AbstractLongObject.descr_le), - __eq__ = interpindirect2app(W_AbstractLongObject.descr_eq), - __ne__ = interpindirect2app(W_AbstractLongObject.descr_ne), - __gt__ = interpindirect2app(W_AbstractLongObject.descr_gt), - __ge__ = interpindirect2app(W_AbstractLongObject.descr_ge), - - __add__ = interpindirect2app(W_AbstractLongObject.descr_add), - __radd__ = interpindirect2app(W_AbstractLongObject.descr_radd), - __sub__ = interpindirect2app(W_AbstractLongObject.descr_sub), - __rsub__ = interpindirect2app(W_AbstractLongObject.descr_rsub), - __mul__ = interpindirect2app(W_AbstractLongObject.descr_mul), - __rmul__ = interpindirect2app(W_AbstractLongObject.descr_rmul), - - __and__ = interpindirect2app(W_AbstractLongObject.descr_and), - __rand__ = interpindirect2app(W_AbstractLongObject.descr_rand), - __or__ = interpindirect2app(W_AbstractLongObject.descr_or), - __ror__ = interpindirect2app(W_AbstractLongObject.descr_ror), - __xor__ = interpindirect2app(W_AbstractLongObject.descr_xor), - __rxor__ = interpindirect2app(W_AbstractLongObject.descr_rxor), - - __lshift__ = interpindirect2app(W_AbstractLongObject.descr_lshift), - __rlshift__ = interpindirect2app(W_AbstractLongObject.descr_rlshift), - __rshift__ = interpindirect2app(W_AbstractLongObject.descr_rshift), - __rrshift__ = interpindirect2app(W_AbstractLongObject.descr_rrshift), - - __floordiv__ = interpindirect2app(W_AbstractLongObject.descr_floordiv), - __rfloordiv__ = interpindirect2app(W_AbstractLongObject.descr_rfloordiv), - __div__ = interpindirect2app(W_AbstractLongObject.descr_div), - __rdiv__ = interpindirect2app(W_AbstractLongObject.descr_rdiv), - __truediv__ = interpindirect2app(W_AbstractLongObject.descr_truediv), - __rtruediv__ = interpindirect2app(W_AbstractLongObject.descr_rtruediv), - __mod__ = interpindirect2app(W_AbstractLongObject.descr_mod), - __rmod__ = interpindirect2app(W_AbstractLongObject.descr_rmod), - __divmod__ = interpindirect2app(W_AbstractLongObject.descr_divmod), - __rdivmod__ = interpindirect2app(W_AbstractLongObject.descr_rdivmod), - - __pow__ = interpindirect2app(W_AbstractLongObject.descr_pow), - __rpow__ = interpindirect2app(W_AbstractLongObject.descr_rpow), -) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -20,7 +20,6 @@ } IDTAG_INT = 1 -#IDTAG_LONG = 3 IDTAG_FLOAT = 5 IDTAG_COMPLEX = 7 @@ -87,7 +86,7 @@ # XXX: Bool/Int/Long are pythontypes but still included here # for delegation to Float/Complex boolobject.W_BoolObject: [], - #intobject.W_IntObject: [], + intobject.W_IntObject: [], floatobject.W_FloatObject: [], typeobject.W_TypeObject: [], sliceobject.W_SliceObject: [], diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -1,5 +1,5 @@ -""" -Implementation of 'small' longs, stored as a r_longlong. +"""Implementation of the int type based on r_longlong. + Useful for 32-bit applications manipulating values a bit larger than fits in an 'int'. """ @@ -11,7 +11,7 @@ from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import WrappedDefault, unwrap_spec -from pypy.objspace.std.intobject import W_AbstractIntObject +from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.longobject import W_AbstractLongObject, W_LongObject from pypy.objspace.std.model import COMMUTATIVE_OPS @@ -70,16 +70,13 @@ return float(self.longlong) def int(self, space): - # XXX: this shouldn't need an ovfcheck? - a = self.longlong - b = intmask(a) - return space.newint(b) if b == a else self + if type(self) is W_SmallLongObject: + return self + if not space.is_overloaded(self, space.w_int, '__int__'): + return W_LongObject(self.num) + return W_Root.int(self, space) - def descr_long(self, space): - if space.is_w(space.type(self), space.w_long): - return self - return W_SmallLongObject(self.longlong) - descr_index = descr_trunc = descr_pos = descr_long + descr_index = descr_trunc = descr_pos = int def descr_float(self, space): return space.newfloat(float(self.longlong)) @@ -98,7 +95,7 @@ def descr_abs(self, space): return self if self.longlong >= 0 else self.descr_neg(space) - def descr_nonzero(self, space): + def descr_bool(self, space): return space.newbool(bool(self.longlong)) def descr_invert(self, space): @@ -110,7 +107,7 @@ if isinstance(w_exponent, W_AbstractLongObject): self = _small2long(space, self) return self.descr_pow(space, w_exponent, w_modulus) - elif not isinstance(w_exponent, W_AbstractIntObject): + elif not isinstance(w_exponent, W_IntObject): return space.w_NotImplemented x = self.longlong @@ -125,7 +122,7 @@ except OverflowError: self = _small2long(space, self) return self.descr_pow(space, w_exponent, w_modulus) - elif isinstance(w_modulus, W_AbstractIntObject): + elif isinstance(w_modulus, W_IntObject): w_modulus = _int2small(space, w_modulus) elif not isinstance(w_modulus, W_AbstractLongObject): return space.w_NotImplemented @@ -147,7 +144,7 @@ @unwrap_spec(w_modulus=WrappedDefault(None)) def descr_rpow(self, space, w_base, w_modulus=None): - if isinstance(w_base, W_AbstractIntObject): + if isinstance(w_base, W_IntObject): # Defer to w_base.descr_pow w_base = _int2small(space, w_base) elif not isinstance(w_base, W_AbstractLongObject): @@ -159,7 +156,7 @@ bigint_op = getattr(rbigint, opname) @func_renamer('descr_' + opname) def descr_cmp(self, space, w_other): - if isinstance(w_other, W_AbstractIntObject): + if isinstance(w_other, W_IntObject): result = op(self.longlong, w_other.int_w(space)) elif not isinstance(w_other, W_AbstractLongObject): return space.w_NotImplemented @@ -184,7 +181,7 @@ @func_renamer(descr_name) def descr_binop(self, space, w_other): - if isinstance(w_other, W_AbstractIntObject): + if isinstance(w_other, W_IntObject): w_other = _int2small(space, w_other) elif not isinstance(w_other, W_AbstractLongObject): return space.w_NotImplemented @@ -211,7 +208,7 @@ long_rop = getattr(W_LongObject, descr_rname) @func_renamer(descr_rname) def descr_rbinop(self, space, w_other): - if isinstance(w_other, W_AbstractIntObject): + if isinstance(w_other, W_IntObject): w_other = _int2small(space, w_other) elif not isinstance(w_other, W_AbstractLongObject): return space.w_NotImplemented @@ -386,7 +383,7 @@ def _int2small(space, w_int): - # XXX: W_IntObject.descr_long should probably return W_SmallLongs + # XXX: W_IntObject.as_w_long should probably return W_SmallLongs return W_SmallLongObject.fromint(w_int.int_w(space)) diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -126,25 +126,13 @@ Cls_oo = make_specialised_class((object, object)) Cls_ff = make_specialised_class((float, float)) -def is_int_w(space, w_obj): - """Determine if obj can be safely casted to an int_w""" - try: - w_obj.int_w(space) - except OperationError, e: - if not (e.match(space, space.w_OverflowError) or - e.match(space, space.w_TypeError)): - raise - return False - return True - def makespecialisedtuple(space, list_w): - # XXX: hardcoded to W_LongObject until py3k W_IntObject is restored - from pypy.objspace.std.longobject import W_LongObject + from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.floatobject import W_FloatObject if len(list_w) == 2: w_arg1, w_arg2 = list_w - if type(w_arg1) is W_LongObject and is_int_w(space, w_arg1): - if type(w_arg2) is W_LongObject and is_int_w(space, w_arg2): + if type(w_arg1) is W_IntObject: + if type(w_arg2) is W_IntObject: return Cls_ii(space, w_arg1, w_arg2) elif type(w_arg1) is W_FloatObject: if type(w_arg2) is W_FloatObject: diff --git a/pypy/objspace/std/test/test_complexobject.py b/pypy/objspace/std/test/test_complexobject.py --- a/pypy/objspace/std/test/test_complexobject.py +++ b/pypy/objspace/std/test/test_complexobject.py @@ -207,9 +207,7 @@ z = 9007199254740992+0j i = 9007199254740993 assert not complex.__eq__(z, i) - assert not complex.__eq__(z, long(i)) assert complex.__ne__(z, i) - assert complex.__ne__(z, long(i)) def test_mod(self): a = 3.33+4.43j diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -1,5 +1,4 @@ # encoding: utf-8 -import py import sys from pypy.objspace.std import intobject as iobj from pypy.objspace.std.multimethod import FailedToImplement @@ -8,9 +7,6 @@ class TestW_IntObject: - def setup_class(cls): - py.test.skip("W_IntObject was replaced w/ W_LongObject in py3k") - def _longshiftresult(self, x): """ calculate an overflowing shift """ n = 1 @@ -80,7 +76,7 @@ f1 = iobj.W_IntObject(x) f2 = iobj.W_IntObject(y) v = f1.descr_add(space, f2) - assert space.isinstance_w(v, space.w_long) + assert space.isinstance_w(v, space.w_int) assert space.bigint_w(v).eq(rbigint.fromlong(x + y)) def test_sub(self): @@ -96,7 +92,7 @@ f1 = iobj.W_IntObject(x) f2 = iobj.W_IntObject(y) v = f1.descr_sub(space, f2) - assert space.isinstance_w(v, space.w_long) + assert space.isinstance_w(v, space.w_int) assert space.bigint_w(v).eq(rbigint.fromlong(sys.maxint - -1)) def test_mul(self): @@ -112,7 +108,7 @@ f1 = iobj.W_IntObject(x) f2 = iobj.W_IntObject(y) v = f1.descr_mul(space, f2) - assert space.isinstance_w(v, space.w_long) + assert space.isinstance_w(v, space.w_int) assert space.bigint_w(v).eq(rbigint.fromlong(x * y)) def test_div(self): @@ -128,7 +124,7 @@ f1 = iobj.W_IntObject(x) f2 = iobj.W_IntObject(y) v = f1.descr_div(space, f2) - assert space.isinstance_w(v, space.w_long) + assert space.isinstance_w(v, space.w_int) assert space.bigint_w(v).eq(rbigint.fromlong(x / y)) def test_mod(self): @@ -155,7 +151,7 @@ f2 = iobj.W_IntObject(y) v = f1.descr_divmod(space, f2) w_q, w_r = space.fixedview(v, 2) - assert space.isinstance_w(w_q, space.w_long) + assert space.isinstance_w(w_q, space.w_int) expected = divmod(x, y) assert space.bigint_w(w_q).eq(rbigint.fromlong(expected[0])) # no overflow possible @@ -187,7 +183,7 @@ assert v.intval == x ** y f1, f2 = [iobj.W_IntObject(i) for i in (10, 20)] v = f1.descr_pow(space, f2, space.w_None) - assert space.isinstance_w(v, space.w_long) + assert space.isinstance_w(v, space.w_int) assert space.bigint_w(v).eq(rbigint.fromlong(pow(10, 20))) def test_neg(self): @@ -199,7 +195,7 @@ x = -sys.maxint-1 f1 = iobj.W_IntObject(x) v = f1.descr_neg(space) - assert space.isinstance_w(v, space.w_long) + assert space.isinstance_w(v, space.w_int) assert space.bigint_w(v).eq(rbigint.fromlong(-x)) def test_pos(self): @@ -225,7 +221,7 @@ x = -sys.maxint-1 f1 = iobj.W_IntObject(x) v = f1.descr_abs(space) - assert space.isinstance_w(v, space.w_long) + assert space.isinstance_w(v, space.w_int) assert space.bigint_w(v).eq(rbigint.fromlong(abs(x))) def test_invert(self): @@ -246,7 +242,7 @@ f1 = iobj.W_IntObject(x) f2 = iobj.W_IntObject(y) v = f1.descr_lshift(space, f2) - assert space.isinstance_w(v, space.w_long) + assert space.isinstance_w(v, space.w_int) assert space.bigint_w(v).eq(rbigint.fromlong(x << y)) def test_rshift(self): @@ -524,10 +520,6 @@ else: assert False, value - def test_coerce(self): - assert 3 .__coerce__(4) == (3, 4) - assert 3 .__coerce__(4L) == NotImplemented - class AppTestIntShortcut(AppTestInt): spaceconfig = {"objspace.std.intshortcut": True} @@ -535,7 +527,7 @@ def test_inplace(self): # ensure other inplace ops still work l = [] - l += xrange(5) + l += range(5) assert l == list(range(5)) a = 8.5 a -= .5 diff --git a/pypy/objspace/std/test/test_stdobjspace.py b/pypy/objspace/std/test/test_stdobjspace.py --- a/pypy/objspace/std/test/test_stdobjspace.py +++ b/pypy/objspace/std/test/test_stdobjspace.py @@ -38,13 +38,13 @@ def test_fastpath_isinstance(self): from pypy.objspace.std.bytesobject import W_BytesObject - from pypy.objspace.std.longobject import W_LongObject + from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.iterobject import W_AbstractSeqIterObject from pypy.objspace.std.iterobject import W_SeqIterObject space = self.space assert space._get_interplevel_cls(space.w_str) is W_BytesObject - assert space._get_interplevel_cls(space.w_int) is W_LongObject + assert space._get_interplevel_cls(space.w_int) is W_AbstractIntObject class X(W_BytesObject): def __init__(self): pass From noreply at buildbot.pypy.org Tue Mar 4 05:29:35 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 4 Mar 2014 05:29:35 +0100 (CET) Subject: [pypy-commit] pypy py3k: we seem to require a new bytecode magic Message-ID: <20140304042935.206391C3427@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69655:cf7c5390681e Date: 2014-03-03 20:28 -0800 http://bitbucket.org/pypy/pypy/changeset/cf7c5390681e/ Log: we seem to require a new bytecode magic diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -35,7 +35,7 @@ # different value for the highest 16 bits. Bump pypy_incremental_magic every # time you make pyc files incompatible -pypy_incremental_magic = 32 # bump it by 16 +pypy_incremental_magic = 48 # bump it by 16 assert pypy_incremental_magic % 16 == 0 assert pypy_incremental_magic < 3000 # the magic number of Python 3. There are # no known magic numbers below this value From noreply at buildbot.pypy.org Tue Mar 4 05:29:36 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 4 Mar 2014 05:29:36 +0100 (CET) Subject: [pypy-commit] pypy py3k: ensure we are testing the 'long' paths Message-ID: <20140304042936.544041C3427@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69656:4a6f33f5edb7 Date: 2014-03-03 20:28 -0800 http://bitbucket.org/pypy/pypy/changeset/4a6f33f5edb7/ Log: ensure we are testing the 'long' paths diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -42,46 +42,54 @@ class AppTestLong: + + def setup_class(cls): + from pypy.interpreter import gateway + from pypy.objspace.std.longobject import W_LongObject + def w__long(space, w_obj): + return W_LongObject.fromint(space, space.int_w(w_obj)) + cls.w__long = cls.space.wrap(gateway.interp2app(w__long)) + def test_trunc(self): import math - assert math.trunc(1) == 1 - assert math.trunc(-1) == -1 + assert math.trunc(self._long(1)) == self._long(1) + assert math.trunc(-self._long(1)) == -self._long(1) def test_add(self): - x = 123 - assert int(x + 12443) == 123 + 12443 + x = self._long(123) + assert int(x + self._long(12443)) == 123 + 12443 x = -20 - assert x + 2 + 3 + True == -14 + assert x + 2 + self._long(3) + True == -self._long(14) def test_sub(self): - x = 58543 - assert int(x - 12332) == 58543 - 12332 - x = 237123838281233 - assert x * 12 == x * 12 + x = self._long(58543) + assert int(x - self._long(12332)) == 58543 - 12332 + x = self._long(237123838281233) + assert x * 12 == x * self._long(12) def test_mul(self): - x = 363 + x = self._long(363) assert x * 2 ** 40 == x << 40 def test_truediv(self): - a = 31415926 / 10000000 + a = self._long(31415926) / self._long(10000000) assert a == 3.1415926 def test_floordiv(self): - x = 31415926 - a = x // 10000000 - assert a == 3 + x = self._long(31415926) + a = x // self._long(10000000) + assert a == self._long(3) def test_numerator_denominator(self): - assert (1).numerator == 1 - assert (1).denominator == 1 - assert (42).numerator == 42 - assert (42).denominator == 1 + assert (self._long(1)).numerator == self._long(1) + assert (self._long(1)).denominator == self._long(1) + assert (self._long(42)).numerator == self._long(42) + assert (self._long(42)).denominator == self._long(1) def test_compare(self): Z = 0 - ZL = 0 - for BIG in (1, 1 << 62, 1 << 9999): + ZL = self._long(0) + for BIG in (self._long(1), self._long(1) << 62, self._long(1) << 9999): assert Z == ZL assert not (Z != ZL) assert ZL == Z @@ -158,7 +166,7 @@ def test_conversion(self): class long2(int): pass - x = 1 + x = self._long(1) x = long2(x<<100) y = int(x) assert type(y) == int @@ -174,13 +182,13 @@ assert type(long2(5) // 1) is int def test_pow(self): - x = 0 - assert pow(x, 0, 1) == 0 - assert pow(-1, -1) == -1.0 + x = self._long(0) + assert pow(x, self._long(0), self._long(1)) == self._long(0) + assert pow(-self._long(1), -self._long(1)) == -1.0 def test_getnewargs(self): - assert 0 .__getnewargs__() == (0,) - assert (-1) .__getnewargs__() == (-1,) + assert self._long(0) .__getnewargs__() == (self._long(0),) + assert (-self._long(1)) .__getnewargs__() == (-self._long(1),) def test_divmod(self): def check_division(x, y): @@ -194,8 +202,8 @@ assert 0 <= r < y else: assert y < r <= 0 - for x in [-1, 0, 1, 2 ** 100 - 1, -2 ** 100 - 1]: - for y in [-105566530, -1, 1, 1034522340]: + for x in [-self._long(1), self._long(0), self._long(1), self._long(2) ** 100 - 1, -self._long(2) ** 100 - 1]: + for y in [-self._long(105566530), -self._long(1), self._long(1), self._long(1034522340)]: print("checking division for %s, %s" % (x, y)) check_division(x, y) # special case from python tests: @@ -206,31 +214,33 @@ y = 10953035502453784575 y >>= s2*16 x = 0x3FE0003FFFFC0001FFF - y = 0x9800FFC1 + y = self._long(0x9800FFC1) check_division(x, y) - raises(ZeroDivisionError, "x // 0") + raises(ZeroDivisionError, "x // self._long(0)") + divmod(3, self._long(4)) def test_format(self): assert repr(12345678901234567890) == '12345678901234567890' assert str(12345678901234567890) == '12345678901234567890' - assert hex(0x1234567890ABCDEF) == '0x1234567890abcdef' - assert oct(0o1234567012345670) == '0o1234567012345670' + assert hex(self._long(0x1234567890ABCDEF)) == '0x1234567890abcdef' + assert oct(self._long(0o1234567012345670)) == '0o1234567012345670' def test_bits(self): - x = 0xAAAAAAAA - assert x | 0x55555555 == 0xFFFFFFFF - assert x & 0x55555555 == 0x00000000 - assert x ^ 0x55555555 == 0xFFFFFFFF - assert -x | 0x55555555 == -0xAAAAAAA9 - assert x | 0x555555555 == 0x5FFFFFFFF - assert x & 0x555555555 == 0x000000000 - assert x ^ 0x555555555 == 0x5FFFFFFFF + x = self._long(0xAAAAAAAA) + assert x | self._long(0x55555555) == self._long(0xFFFFFFFF) + assert x & self._long(0x55555555) == self._long(0x00000000) + assert x ^ self._long(0x55555555) == self._long(0xFFFFFFFF) + assert -x | self._long(0x55555555) == -self._long(0xAAAAAAA9) + assert x | self._long(0x555555555) == self._long(0x5FFFFFFFF) + assert x & self._long(0x555555555) == self._long(0x000000000) + assert x ^ self._long(0x555555555) == self._long(0x5FFFFFFFF) def test_hash(self): import sys modulus = sys.hash_info.modulus - for x in (list(range(200)) + - [1234567890123456789, 18446743523953737727, + for x in ([self._long(i) for i in range(200)] + + [self._long(1234567890123456789), + 1234567890123456789, 18446743523953737727, 987685321987685321987685321987685321987685321]): y = x % modulus assert hash(x) == hash(y) @@ -247,10 +257,10 @@ def test_math_log(self): import math - raises(ValueError, math.log, 0) - raises(ValueError, math.log, -1) - raises(ValueError, math.log, -2) - raises(ValueError, math.log, -(1 << 10000)) + raises(ValueError, math.log, self._long(0)) + raises(ValueError, math.log, -self._long(1)) + raises(ValueError, math.log, -self._long(2)) + raises(ValueError, math.log, -(self._long(1) << 10000)) #raises(ValueError, math.log, 0) raises(ValueError, math.log, -1) raises(ValueError, math.log, -2) @@ -261,15 +271,15 @@ assert int(n) == n assert str(int(n)) == str(n) a = memoryview(b'123') - assert int(a) == 123 + assert int(a) == self._long(123) def test_huge_longs(self): import operator - x = 1 - huge = x << 40000 + x = self._long(1) + huge = x << self._long(40000) raises(OverflowError, float, huge) raises(OverflowError, operator.truediv, huge, 3) - raises(OverflowError, operator.truediv, huge, 3) + raises(OverflowError, operator.truediv, huge, self._long(3)) def test_just_trunc(self): class myint(object): @@ -307,8 +317,8 @@ assert int(A('abc')) == 42 def test_conjugate(self): - assert (7).conjugate() == 7 - assert (-7).conjugate() == -7 + assert (self._long(7)).conjugate() == self._long(7) + assert (-self._long(7)).conjugate() == -self._long(7) class L(int): pass @@ -318,10 +328,10 @@ class L(int): def __pos__(self): return 43 - assert L(7).conjugate() == 7 + assert L(7).conjugate() == self._long(7) def test_bit_length(self): - assert (8).bit_length() == 4 + assert self._long(8).bit_length() == 4 assert (-1<<40).bit_length() == 41 assert ((2**31)-1).bit_length() == 31 @@ -342,8 +352,8 @@ raises(ValueError, (-5).to_bytes, 1, 'foo') def test_negative_zero(self): - x = eval("-0") - assert x == 0 + x = eval("-self._long(0)") + assert x == self._long(0) def test_long_real(self): class A(int): pass @@ -388,13 +398,10 @@ assert str(e.value) == ( "int() argument must be a string or a number, not 'list'") - def test_coerce(self): - assert 3.__coerce__(4) == (3, 4) - assert 3.__coerce__(4) == (3, 4) - assert 3.__coerce__(object()) == NotImplemented - def test_large_identity(self): import sys a = sys.maxsize + 1 b = sys.maxsize + 2 assert a is not b + b -= 1 + assert a is b diff --git a/pypy/objspace/std/test/test_smalllongobject.py b/pypy/objspace/std/test/test_smalllongobject.py --- a/pypy/objspace/std/test/test_smalllongobject.py +++ b/pypy/objspace/std/test/test_smalllongobject.py @@ -47,24 +47,31 @@ class AppTestSmallLong(test_longobject.AppTestLong): spaceconfig = {"objspace.std.withsmalllong": True} + def setup_class(cls): + from pypy.interpreter import gateway + from pypy.objspace.std.smalllongobject import W_SmallLongObject + def w__long(space, w_obj): + return W_SmallLongObject.fromint(space.int_w(w_obj)) + cls.w__long = cls.space.wrap(gateway.interp2app(w__long)) + def test_sl_simple(self): import __pypy__ - s = __pypy__.internal_repr(5) + s = __pypy__.internal_repr(self._long(5)) assert 'SmallLong' in s def test_sl_hash(self): import __pypy__ - x = 5 + x = self._long(5) assert 'SmallLong' in __pypy__.internal_repr(x) assert hash(5) == hash(x) - biglong = 5 + biglong = self._long(5) biglong ^= 2**100 # hack based on the fact that xor__Long_Long biglong ^= 2**100 # does not call newlong() assert biglong == 5 assert 'SmallLong' not in __pypy__.internal_repr(biglong) assert hash(5) == hash(biglong) # - x = 0x123456789ABCDEF + x = self._long(0x123456789ABCDEF) assert 'SmallLong' in __pypy__.internal_repr(x) biglong = x biglong ^= 2**100 @@ -74,7 +81,7 @@ assert hash(biglong) == hash(x) def test_sl_int(self): - x = 0x123456789ABCDEF + x = self._long(0x123456789ABCDEF) two = 2 assert int(x) == x assert type(int(x)) == type(0x1234567 ** two) @@ -84,21 +91,21 @@ def test_sl_long(self): import __pypy__ - x = int(0) + x = self._long(0) assert 'SmallLong' in __pypy__.internal_repr(x) def test_sl_add(self): import __pypy__ - x = 0x123456789ABCDEF + x = self._long(0x123456789ABCDEF) assert x + x == 0x2468ACF13579BDE assert 'SmallLong' in __pypy__.internal_repr(x + x) - x = -0x123456789ABCDEF + x = self._long(-0x123456789ABCDEF) assert x + x == -0x2468ACF13579BDE assert 'SmallLong' in __pypy__.internal_repr(x + x) - x = 0x723456789ABCDEF0 + x = self._long(0x723456789ABCDEF0) assert x + x == 0xE468ACF13579BDE0 assert 'SmallLong' not in __pypy__.internal_repr(x + x) - x = -0x723456789ABCDEF0 + x = self._long(-0x723456789ABCDEF0) assert x + x == -0xE468ACF13579BDE0 assert 'SmallLong' not in __pypy__.internal_repr(x + x) @@ -113,8 +120,7 @@ assert 'SmallLong' in __pypy__.internal_repr(x - y) def test_sl_lshift(self): - # XXX: was [1, 1L] - for x in [1, 1]: + for x in [1, self._long(1)]: x = 1 assert x << 1 == 2 assert x << 30 == 1073741824 From noreply at buildbot.pypy.org Tue Mar 4 05:29:37 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 4 Mar 2014 05:29:37 +0100 (CET) Subject: [pypy-commit] pypy py3k: another test Message-ID: <20140304042937.710521C3427@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69657:a01d60c8e032 Date: 2014-03-03 20:28 -0800 http://bitbucket.org/pypy/pypy/changeset/a01d60c8e032/ Log: another test diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -241,7 +241,8 @@ for x in ([self._long(i) for i in range(200)] + [self._long(1234567890123456789), 1234567890123456789, 18446743523953737727, - 987685321987685321987685321987685321987685321]): + 987685321987685321987685321987685321987685321, + 10**50]): y = x % modulus assert hash(x) == hash(y) assert hash(-x) == hash(-y) From noreply at buildbot.pypy.org Tue Mar 4 06:24:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 06:24:22 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: make translation happy Message-ID: <20140304052422.66BDB1C042F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69658:13e440892ed6 Date: 2014-03-04 00:02 -0500 http://bitbucket.org/pypy/pypy/changeset/13e440892ed6/ Log: make translation happy diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -102,7 +102,8 @@ raise pw = c_getpwuid(uid) if not pw: - raise oefmt(space.w_KeyError, "%s: %d", msg, uid) + raise OperationError(space.w_KeyError, space.wrap( + "%s: %d" % (msg, uid))) return make_struct_passwd(space, pw) @unwrap_spec(name=str) From noreply at buildbot.pypy.org Tue Mar 4 06:28:46 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 06:28:46 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: this too Message-ID: <20140304052846.3ADFE1C10A8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69659:595fef85a265 Date: 2014-03-04 00:28 -0500 http://bitbucket.org/pypy/pypy/changeset/595fef85a265/ Log: this too diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -3,7 +3,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from rpython.rlib.rarithmetic import r_uint +from rpython.rlib.rarithmetic import r_uint, widen eci = ExternalCompilationInfo(includes=['pwd.h']) @@ -103,7 +103,7 @@ pw = c_getpwuid(uid) if not pw: raise OperationError(space.w_KeyError, space.wrap( - "%s: %d" % (msg, uid))) + "%s: %d" % (msg, widen(uid)))) return make_struct_passwd(space, pw) @unwrap_spec(name=str) From noreply at buildbot.pypy.org Tue Mar 4 06:30:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 4 Mar 2014 06:30:41 +0100 (CET) Subject: [pypy-commit] pypy py3k: force small long Message-ID: <20140304053041.B91B31C0F86@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69660:b5e7e097d0c0 Date: 2014-03-03 21:29 -0800 http://bitbucket.org/pypy/pypy/changeset/b5e7e097d0c0/ Log: force small long diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -190,9 +190,16 @@ class AppTestSmallLong(AppTestMarshal): spaceconfig = {"objspace.std.withsmalllong": True} + def setup_class(cls): + from pypy.interpreter import gateway + from pypy.objspace.std.smalllongobject import W_SmallLongObject + def w__small(space, w_obj): + return W_SmallLongObject.fromint(space.int_w(w_obj)) + cls.w__small = cls.space.wrap(gateway.interp2app(w__small)) + def test_smalllong(self): import __pypy__ - x = -123456789012345 + x = self._small(-123456789012345) assert 'SmallLong' in __pypy__.internal_repr(x) y = self.marshal_check(x) assert y == x From noreply at buildbot.pypy.org Tue Mar 4 09:00:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 09:00:14 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix rawffi Message-ID: <20140304080014.CB3591C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69661:f2b3b189bc4f Date: 2014-03-04 02:03 -0500 http://bitbucket.org/pypy/pypy/changeset/f2b3b189bc4f/ Log: fix rawffi diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -16,7 +16,7 @@ from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib import clibffi, rgc from rpython.rlib.rarithmetic import intmask, signedtype, widen, r_uint, \ - r_longlong, r_ulonglong + r_ulonglong from rpython.rtyper.lltypesystem import lltype, rffi @@ -269,12 +269,10 @@ return x >> 16 def BIT_MASK(x, ll_t): - if ll_t is lltype.SignedLongLong: - one = r_longlong(1) - elif ll_t is lltype.UnsignedLongLong: + if ll_t is lltype.SignedLongLong or ll_t is lltype.UnsignedLongLong: one = r_ulonglong(1) else: - one = 1 + one = r_uint(1) # to avoid left shift by x == sizeof(ll_t) return (((one << (x - 1)) - 1) << 1) + 1 BIT_MASK._annspecialcase_ = 'specialize:arg(1)' @@ -317,8 +315,7 @@ if ll_t is lltype.Bool or signedtype(ll_t._type): sign = (value >> (numbits - 1)) & 1 if sign: - one = r_longlong(1) if ll_t is lltype.SignedLongLong else 1 - value = value - (one << numbits) + value -= bitmask + 1 value = rffi.cast(ll_t, value) break return value From noreply at buildbot.pypy.org Tue Mar 4 09:00:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 09:00:16 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: update _testcapi module, re-enable in test_codecs.py Message-ID: <20140304080016.2B63C1C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69662:df7577100226 Date: 2014-03-04 02:44 -0500 http://bitbucket.org/pypy/pypy/changeset/df7577100226/ Log: update _testcapi module, re-enable in test_codecs.py diff --git a/lib-python/2.7/test/test_codecs.py b/lib-python/2.7/test/test_codecs.py --- a/lib-python/2.7/test/test_codecs.py +++ b/lib-python/2.7/test/test_codecs.py @@ -2,11 +2,7 @@ import unittest import codecs import locale -import sys, StringIO -try: - import _testcapi -except ImportError: - _testcapi = None +import sys, StringIO, _testcapi def coding_checker(self, coder): def check(input, expect): @@ -1529,7 +1525,7 @@ decodedresult += reader.read() self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding)) - if encoding not in broken_incremental_coders and _testcapi: + if encoding not in broken_incremental_coders: # check incremental decoder/encoder (fetched via the Python # and C API) and iterencode()/iterdecode() try: diff --git a/lib_pypy/_testcapimodule.c b/lib_pypy/_testcapimodule.c --- a/lib_pypy/_testcapimodule.c +++ b/lib_pypy/_testcapimodule.c @@ -1119,7 +1119,7 @@ if (!PyArg_ParseTuple(args, "u#|s", &unicode, &length, &errors)) return NULL; - decimal_length = length * 7; /* len('€') */ + decimal_length = length * 10; /* len('􏿿') */ decimal = PyBytes_FromStringAndSize(NULL, decimal_length); if (decimal == NULL) return NULL; @@ -1814,7 +1814,7 @@ ; test_structmembers *ob; const char *s = NULL; - Py_ssize_t string_len = 0; + int string_len = 0; ob = PyObject_New(test_structmembers, type); if (ob == NULL) return NULL; From noreply at buildbot.pypy.org Tue Mar 4 09:00:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 09:00:17 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: kill some more stdlib modifications no longer needed Message-ID: <20140304080017.6EE681C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69663:58c31b45df71 Date: 2014-03-04 02:59 -0500 http://bitbucket.org/pypy/pypy/changeset/58c31b45df71/ Log: kill some more stdlib modifications no longer needed diff --git a/lib-python/2.7/test/test_code.py b/lib-python/2.7/test/test_code.py --- a/lib-python/2.7/test/test_code.py +++ b/lib-python/2.7/test/test_code.py @@ -82,6 +82,7 @@ import unittest import weakref +import _testcapi from test import test_support @@ -104,9 +105,7 @@ class CodeTest(unittest.TestCase): - @test_support.impl_detail("test for PyCode_NewEmpty") def test_newempty(self): - import _testcapi co = _testcapi.code_newempty("filename", "funcname", 15) self.assertEqual(co.co_filename, "filename") self.assertEqual(co.co_name, "funcname") diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -19,12 +19,9 @@ import re import time import struct +import _testcapi import sysconfig try: - import _testcapi -except ImportError: - _testcapi = None -try: import thread except ImportError: thread = None @@ -975,7 +972,7 @@ # add GC header size if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\ ((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))): - size += 1 if _testcapi is None else _testcapi.SIZEOF_PYGC_HEAD + size += _testcapi.SIZEOF_PYGC_HEAD msg = 'wrong size for %s: got %d, expected %d' \ % (type(o), result, size) test.assertEqual(result, size, msg) diff --git a/lib-python/2.7/test/test_traceback.py b/lib-python/2.7/test/test_traceback.py --- a/lib-python/2.7/test/test_traceback.py +++ b/lib-python/2.7/test/test_traceback.py @@ -1,9 +1,6 @@ """Test cases for traceback module""" -try: - from _testcapi import traceback_print -except ImportError: - traceback_print = None +from _testcapi import traceback_print from StringIO import StringIO import sys import unittest @@ -179,8 +176,6 @@ class TracebackFormatTests(unittest.TestCase): def test_traceback_format(self): - if traceback_print is None: - raise unittest.SkipTest('Requires _testcapi') try: raise KeyError('blah') except KeyError: diff --git a/lib-python/2.7/test/test_unicode.py b/lib-python/2.7/test/test_unicode.py --- a/lib-python/2.7/test/test_unicode.py +++ b/lib-python/2.7/test/test_unicode.py @@ -1636,10 +1636,7 @@ self.assertEqual("{}".format(u), '__unicode__ overridden') def test_encode_decimal(self): - try: - from _testcapi import unicode_encodedecimal - except ImportError: - raise unittest.SkipTest('Requires _testcapi') + from _testcapi import unicode_encodedecimal self.assertEqual(unicode_encodedecimal(u'123'), b'123') self.assertEqual(unicode_encodedecimal(u'\u0663.\u0661\u0664'), From noreply at buildbot.pypy.org Tue Mar 4 09:25:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 09:25:55 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: update path Message-ID: <20140304082555.55DD71C328C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69664:0e44a8eef86d Date: 2014-03-04 03:22 -0500 http://bitbucket.org/pypy/pypy/changeset/0e44a8eef86d/ Log: update path diff --git a/pypy/module/_rawffi/alt/test/test_ztranslation.py b/pypy/module/_rawffi/alt/test/test_ztranslation.py --- a/pypy/module/_rawffi/alt/test/test_ztranslation.py +++ b/pypy/module/_rawffi/alt/test/test_ztranslation.py @@ -1,4 +1,4 @@ from pypy.objspace.fake.checkmodule import checkmodule def test__ffi_translates(): - checkmodule('_rawffi') + checkmodule('_rawffi.alt') From noreply at buildbot.pypy.org Tue Mar 4 09:46:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 09:46:09 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix rawffi, again Message-ID: <20140304084609.86C5D1C0483@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69665:4b0a917d09f4 Date: 2014-03-04 03:45 -0500 http://bitbucket.org/pypy/pypy/changeset/4b0a917d09f4/ Log: fix rawffi, again diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -289,8 +289,9 @@ if numbits: lowbit = LOW_BIT(bitsize) bitmask = BIT_MASK(numbits, TP) - value = widen(value) - current = widen(read_ptr(ptr, 0, TP)) + masktype = lltype.typeOf(bitmask) + value = rffi.cast(masktype, value) + current = rffi.cast(masktype, read_ptr(ptr, 0, TP)) current &= ~(bitmask << lowbit) current |= (value & bitmask) << lowbit value = rffi.cast(TP, current) @@ -309,7 +310,8 @@ if numbits: lowbit = LOW_BIT(bitsize) bitmask = BIT_MASK(numbits, ll_t) - value = widen(value) + masktype = lltype.typeOf(bitmask) + value = rffi.cast(masktype, value) value >>= lowbit value &= bitmask if ll_t is lltype.Bool or signedtype(ll_t._type): From noreply at buildbot.pypy.org Tue Mar 4 10:32:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Mar 2014 10:32:28 +0100 (CET) Subject: [pypy-commit] stmgc default: Refactor the contention management. Now become_inevitable is also a Message-ID: <20140304093228.710541C31F9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r943:e04a0e585284 Date: 2014-03-04 10:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/e04a0e585284/ Log: Refactor the contention management. Now become_inevitable is also a case of contention (in this case, for the right to be inevitable). Contention managers can choose to pause the running thread too. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -3,80 +3,189 @@ #endif -static void contention_management(uint8_t other_segment_num) +enum contention_kind_e { + + /* A write-write contention occurs when we running our transaction + and detect that we are about to write to an object that another + thread is also writing to. This kind of contention must be + resolved before continuing. This *must* abort one of the two + threads: the caller's thread is not at a safe-point, so cannot + wait! */ + WRITE_WRITE_CONTENTION, + + /* A write-read contention occurs when we are trying to commit: it + means that an object we wrote to was also read by another + transaction. Even though it would seem obvious that we should + just abort the other thread and proceed in our commit, a more + subtle answer would be in some cases to wait for the other thread + to commit first. It would commit having read the old value, and + then we can commit our change to it. */ + WRITE_READ_CONTENTION, + + /* An inevitable contention occurs when we're trying to become + inevitable but another thread already is. We can never abort the + other thread in this case, but we still have the choice to abort + ourselves or pause until the other thread commits. */ + INEVITABLE_CONTENTION, +}; + +struct contmgr_s { + enum contention_kind_e kind; + struct stm_priv_segment_info_s *other_pseg; + bool abort_other; + bool try_sleep; // XXX add a way to timeout, but should handle repeated + // calls to contention_management() to avoid re-sleeping + // for the whole duration +}; + + +/************************************************************/ + + +__attribute__((unused)) +static void cm_always_abort_myself(struct contmgr_s *cm) { - /* A simple contention manager. Called when some other thread - holds the write lock on an object. The current thread tries - to do either a write or a read on it. */ + cm->abort_other = false; +} +__attribute__((unused)) +static void cm_always_abort_other(struct contmgr_s *cm) +{ + cm->abort_other = true; +} + +__attribute__((unused)) +static void cm_abort_the_younger(struct contmgr_s *cm) +{ + if (STM_PSEGMENT->start_time >= cm->other_pseg->start_time) { + /* We started after the other thread. Abort */ + cm->abort_other = false; + } + else { + cm->abort_other = true; + } +} + +__attribute__((unused)) +static void cm_always_wait_for_other_thread(struct contmgr_s *cm) +{ + cm_abort_the_younger(cm); + cm->try_sleep = true; +} + +__attribute__((unused)) +static void cm_pause_if_younger(struct contmgr_s *cm) +{ + if (STM_PSEGMENT->start_time >= cm->other_pseg->start_time) { + /* We started after the other thread. Pause */ + cm->try_sleep = true; + cm->abort_other = false; + } + else { + cm->abort_other = true; + } +} + + +/************************************************************/ + + +static void contention_management(uint8_t other_segment_num, + enum contention_kind_e kind) +{ assert(_has_mutex()); assert(other_segment_num != STM_SEGMENT->segment_num); - /* Who should abort here: this thread, or the other thread? */ - struct stm_priv_segment_info_s* other_pseg; - other_pseg = get_priv_segment(other_segment_num); - - if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { - /* I'm inevitable, so the other is not. */ - assert(other_pseg->transaction_state != TS_INEVITABLE); - other_pseg->pub.nursery_end = NSE_SIGABORT; - } - else if (other_pseg->start_time <= STM_PSEGMENT->start_time) { - /* The other thread started before us, so I should abort, as I'm - the least long-running transaction. */ - } - else if (other_pseg->transaction_state == TS_REGULAR) { - /* The other thread started strictly after us. We tell it - to abort if we can (e.g. if it's not TS_INEVITABLE). */ - other_pseg->pub.nursery_end = NSE_SIGABORT; - } - - /* Now check what we just did... almost: the check at the following - line can also find a NSE_SIGABORT that was set earlier. - */ - if (other_pseg->pub.nursery_end != NSE_SIGABORT) { - /* if the other thread is not in aborting-soon mode, then *we* - must abort. */ - abort_with_mutex(); - } -} - -static void write_write_contention_management(uintptr_t lock_idx) -{ - s_mutex_lock(); if (must_abort()) abort_with_mutex(); - uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; - if (prev_owner != 0 && prev_owner != STM_PSEGMENT->write_lock_num) { + /* Who should abort here: this thread, or the other thread? */ + struct contmgr_s contmgr; + contmgr.kind = kind; + contmgr.other_pseg = get_priv_segment(other_segment_num); + contmgr.abort_other = false; + contmgr.try_sleep = false; - uint8_t other_segment_num = prev_owner - 1; - assert(get_priv_segment(other_segment_num)->write_lock_num == - prev_owner); + /* Pick one contention management... could be made dynamically choosable */ +#ifdef STM_TESTS + cm_abort_the_younger(&contmgr); +#else + cm_always_wait_for_other_thread(&contmgr); +#endif - /* Do generic contention management. If found that we must abort, - calls abort_with_mutex() and never returns. If found that the - other thread must abort, signal it with NSE_SIGABORT. Note that - this can overwrite a NSE_SIGPAUSE, which is fine. */ - contention_management(other_segment_num); - assert(get_segment(other_segment_num)->nursery_end == NSE_SIGABORT); + /* Fix the choices that are found incorrect due to TS_INEVITABLE + or NSE_SIGABORT */ + if (contmgr.other_pseg->pub.nursery_end == NSE_SIGABORT) { + contmgr.abort_other = true; + contmgr.try_sleep = false; + } + else if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + assert(contmgr.other_pseg->transaction_state != TS_INEVITABLE); + contmgr.abort_other = true; + } + else if (contmgr.other_pseg->transaction_state == TS_INEVITABLE) { + contmgr.abort_other = false; + } - /* The rest of this code is for the case where we continue to - run. We have to signal the other thread to abort, and wait - until it does. */ + if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION && + contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) { + /* Sleep. - int sp = get_priv_segment(other_segment_num)->safe_point; + - Not for write-write contentions, because we're not at a + safe-point. + + - To prevent loops of threads waiting for each others, use + a crude heuristic of never pausing for a thread that is + itself already paused here. + */ + contmgr.other_pseg->signal_when_done = true; + + dprintf(("pausing...\n")); + cond_signal(C_AT_SAFE_POINT); + STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE; + cond_wait(C_TRANSACTION_DONE); + STM_PSEGMENT->safe_point = SP_RUNNING; + dprintf(("pausing done\n")); + + if (must_abort()) + abort_with_mutex(); + } + else if (!contmgr.abort_other) { + dprintf(("abort in contention\n")); + abort_with_mutex(); + } + else { + /* We have to signal the other thread to abort, and wait until + it does. */ + contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT; + + int sp = contmgr.other_pseg->safe_point; switch (sp) { case SP_RUNNING: /* The other thread is running now, so as NSE_SIGABORT was set in its 'nursery_end', it will soon enter a mutex_lock() and thus abort. + + In this case, we will wait until it broadcasts "I'm done + aborting". Important: this is not a safe point of any + kind! The shadowstack may not be correct here. It + should not end in a deadlock, because the target thread + is, in principle, guaranteed to call abort_with_mutex() + very soon. */ + dprintf(("contention: wait C_ABORTED...\n")); + cond_wait(C_ABORTED); + dprintf(("contention: done\n")); + + if (must_abort()) + abort_with_mutex(); break; /* The other cases are where the other thread is at a safe-point. We wake it up by sending the correct signal. + We don't have to wait here: the other thread will not do + anything more than abort when it really wakes up later. */ case SP_WAIT_FOR_C_REQUEST_REMOVED: cond_broadcast(C_REQUEST_REMOVED); @@ -86,29 +195,45 @@ cond_broadcast(C_AT_SAFE_POINT); break; + case SP_WAIT_FOR_C_TRANSACTION_DONE: + cond_broadcast(C_TRANSACTION_DONE); + break; + #ifdef STM_TESTS case SP_WAIT_FOR_OTHER_THREAD: - /* abort anyway for tests. We can't wait here */ - abort_with_mutex(); + /* for tests: the other thread will abort as soon as + stm_stop_safe_point() is called */ + break; #endif default: stm_fatalerror("unexpected other_pseg->safe_point: %d", sp); } - /* wait, hopefully until the other thread broadcasts "I'm - done aborting" (spurious wake-ups are ok). Important: - this is not a safe point of any kind! The shadowstack - is not correct here. It should not end in a deadlock, - because the target thread is, in principle, guaranteed - to call abort_with_mutex(). - */ - dprintf(("contention: wait C_ABORTED...\n")); - cond_wait(C_ABORTED); - dprintf(("contention: done\n")); + if (is_aborting_now(other_segment_num)) { + /* The other thread is blocked in a safe-point with NSE_SIGABORT. + We don't have to wake it up right now, but we know it will + abort as soon as it wakes up. We can safely force it to + reset its state now. */ + dprintf(("reset other modified\n")); + reset_modified_from_other_segments(other_segment_num); + } + dprintf(("killed other thread\n")); + } +} - if (must_abort()) - abort_with_mutex(); +static void write_write_contention_management(uintptr_t lock_idx) +{ + s_mutex_lock(); + + uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; + if (prev_owner != 0 && prev_owner != STM_PSEGMENT->write_lock_num) { + + uint8_t other_segment_num = prev_owner - 1; + assert(get_priv_segment(other_segment_num)->write_lock_num == + prev_owner); + + contention_management(other_segment_num, WRITE_WRITE_CONTENTION); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ @@ -116,3 +241,13 @@ s_mutex_unlock(); } + +static void write_read_contention_management(uint8_t other_segment_num) +{ + contention_management(other_segment_num, WRITE_READ_CONTENTION); +} + +static void inevitable_contention_management(uint8_t other_segment_num) +{ + contention_management(other_segment_num, INEVITABLE_CONTENTION); +} diff --git a/c7/stm/contention.h b/c7/stm/contention.h --- a/c7/stm/contention.h +++ b/c7/stm/contention.h @@ -1,3 +1,9 @@ -static void contention_management(uint8_t other_segment_num); static void write_write_contention_management(uintptr_t lock_idx); +static void write_read_contention_management(uint8_t other_segment_num); +static void inevitable_contention_management(uint8_t other_segment_num); + +static inline bool is_aborting_now(uint8_t other_segment_num) { + return (get_segment(other_segment_num)->nursery_end == NSE_SIGABORT && + get_priv_segment(other_segment_num)->safe_point != SP_RUNNING); +} diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -26,7 +26,7 @@ } /* do a read-barrier now. Note that this must occur before the - safepoints that may be issued in contention_management(). */ + safepoints that may be issued in write_write_contention_management(). */ stm_read(obj); /* claim the write-lock for this object. In case we're running the @@ -192,14 +192,17 @@ # error "The logic in the functions below only works with two segments" #endif -static void detect_write_read_conflicts(void) +static bool detect_write_read_conflicts(void) { long remote_num = 1 - STM_SEGMENT->segment_num; char *remote_base = get_segment_base(remote_num); uint8_t remote_version = get_segment(remote_num)->transaction_read_version; if (get_priv_segment(remote_num)->transaction_state == TS_NONE) - return; /* no need to check */ + return false; /* no need to check */ + + if (is_aborting_now(remote_num)) + return false; /* no need to check: is pending immediate abort */ LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, @@ -207,14 +210,17 @@ ({ if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ - contention_management(remote_num); + write_read_contention_management(remote_num); - /* If we reach this point, it means that we would like - the other thread to abort. We're done here. */ - assert(get_segment(remote_num)->nursery_end == NSE_SIGABORT); - return; + /* If we reach this point, we didn't abort, but maybe we + had to wait for the other thread to commit. If we + did, then we have to restart committing from our call + to synchronize_all_threads(). */ + return true; } })); + + return false; } static void synchronize_overflow_object_now(object_t *obj) @@ -335,13 +341,15 @@ s_mutex_lock(); + restart: /* force all other threads to be paused. They will unpause automatically when we are done here, i.e. at mutex_unlock(). Important: we should not call cond_wait() in the meantime. */ synchronize_all_threads(); /* detect conflicts */ - detect_write_read_conflicts(); + if (detect_write_read_conflicts()) + goto restart; /* cannot abort any more from here */ dprintf(("commit_transaction\n")); @@ -386,16 +394,25 @@ abort_with_mutex(); } -static void reset_modified_from_other_segments(void) +static void +reset_modified_from_other_segments(int segment_num) { /* pull the right versions from other threads in order - to reset our pages as part of an abort */ - long remote_num = 1 - STM_SEGMENT->segment_num; - char *local_base = STM_SEGMENT->segment_base; + to reset our pages as part of an abort. + + Note that this function is also sometimes called from + contention.c to clean up the state of a different thread, + when we would really like it to be aborted now and it is + suspended at a safe-point. + + */ + struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); + long remote_num = !segment_num; + char *local_base = get_segment_base(segment_num); char *remote_base = get_segment_base(remote_num); LIST_FOREACH_R( - STM_PSEGMENT->modified_old_objects, + pseg->modified_old_objects, object_t * /*item*/, ({ /* memcpy in the opposite direction than @@ -423,11 +440,11 @@ /* clear the write-lock */ uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; assert((intptr_t)lock_idx >= 0); - assert(write_locks[lock_idx] == STM_PSEGMENT->write_lock_num); + assert(write_locks[lock_idx] == pseg->write_lock_num); write_locks[lock_idx] = 0; })); - list_clear(STM_PSEGMENT->modified_old_objects); + list_clear(pseg->modified_old_objects); } static void abort_with_mutex(void) @@ -449,7 +466,7 @@ throw_away_nursery(); /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ - reset_modified_from_other_segments(); + reset_modified_from_other_segments(STM_SEGMENT->segment_num); stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; stm_thread_local_t *tl = STM_SEGMENT->running_thread; diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -116,6 +116,9 @@ /* Temp for minor collection */ bool minor_collect_will_commit_now; + /* For sleeping contention management */ + bool signal_when_done; + /* In case of abort, we restore the 'shadowstack' field and the 'thread_local_obj' field. */ object_t **shadowstack_at_start_of_transaction; @@ -132,6 +135,7 @@ SP_RUNNING, SP_WAIT_FOR_C_REQUEST_REMOVED, SP_WAIT_FOR_C_AT_SAFE_POINT, + SP_WAIT_FOR_C_TRANSACTION_DONE, #ifdef STM_TESTS SP_WAIT_FOR_OTHER_THREAD, #endif diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -39,7 +39,7 @@ __sync_lock_release(&pages_ctl.mutex_pages); } -static bool _has_mutex_pages(void) __attribute__((unused)); +__attribute__((unused)) static bool _has_mutex_pages(void) { return pages_ctl.mutex_pages != 0; diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -170,7 +170,7 @@ s_mutex_unlock(); } -static bool _is_tl_registered(stm_thread_local_t *tl) __attribute__((unused)); +__attribute__((unused)) static bool _is_tl_registered(stm_thread_local_t *tl) { return tl->next != NULL; diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -127,14 +127,18 @@ for (i = 0; i < NB_SEGMENTS; i++) { if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { if (can_abort) { - /* for now, always abort if we can. We could also try - sometimes to wait for the other thread (needs to - take care about setting safe_point then) */ - abort_with_mutex(); + /* handle this case like a contention: it will either + abort us (not the other thread, which is inevitable), + or for a while. If we go past this call, then we + waited; in this case we have to re-check if no other + thread is inevitable. */ + inevitable_contention_management(i); } - /* wait for stm_commit_transaction() to finish this - inevitable transaction */ - cond_wait(C_INEVITABLE); + else { + /* wait for stm_commit_transaction() to finish this + inevitable transaction */ + cond_wait(C_INEVITABLE); + } goto restart; } } @@ -192,17 +196,23 @@ { assert(_has_mutex()); + /* wake up one of the threads waiting in acquire_thread_segment() */ + cond_signal(C_SEGMENT_FREE); + + /* if contention management asked for it, broadcast this thread's end */ + if (STM_PSEGMENT->signal_when_done) { + cond_broadcast(C_TRANSACTION_DONE); + STM_PSEGMENT->signal_when_done = false; + } + assert(STM_SEGMENT->running_thread == tl); STM_SEGMENT->running_thread = NULL; assert(sync_ctl.in_use[tl->associated_segment_num] == 1); sync_ctl.in_use[tl->associated_segment_num] = 0; - - /* wake up one of the threads waiting in acquire_thread_segment() */ - cond_signal(C_SEGMENT_FREE); } -static bool _running_transaction(void) __attribute__((unused)); +__attribute__((unused)) static bool _running_transaction(void) { return (STM_SEGMENT->running_thread != NULL); diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -10,6 +10,7 @@ C_REQUEST_REMOVED, C_INEVITABLE, C_ABORTED, + C_TRANSACTION_DONE, _C_TOTAL }; static void s_mutex_lock(void); diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -47,8 +47,6 @@ our_trs.set_must_abort(objs_in_conflict) elif wait: assert not our_trs.inevitable - # abort anyway: - our_trs.set_must_abort(objs_in_conflict) class TransactionState(object): From noreply at buildbot.pypy.org Tue Mar 4 10:34:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Mar 2014 10:34:59 +0100 (CET) Subject: [pypy-commit] stmgc default: Clarify this assert Message-ID: <20140304093459.5542E1C31F9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r944:164016b917bd Date: 2014-03-04 10:34 +0100 http://bitbucket.org/pypy/stmgc/changeset/164016b917bd/ Log: Clarify this assert diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -109,9 +109,9 @@ still have the flag */ long i; for (i = 0; i < NB_SEGMENTS; i++) { - assert(i == STM_SEGMENT->segment_num || - (((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) - ->stm_flags & GCFLAG_WRITE_BARRIER)); + if (i != STM_SEGMENT->segment_num) + assert(((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) + ->stm_flags & GCFLAG_WRITE_BARRIER); } } From noreply at buildbot.pypy.org Tue Mar 4 11:12:06 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 11:12:06 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: handle eintr in _io module Message-ID: <20140304101206.A88E81C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69666:0c806895b514 Date: 2014-03-04 05:01 -0500 http://bitbucket.org/pypy/pypy/changeset/0c806895b514/ Log: handle eintr in _io module diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -10,26 +10,13 @@ from rpython.rlib import rposix from rpython.tool.sourcetools import func_renamer from pypy.module._io.interp_iobase import ( - W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, + W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, trap_eintr, check_readable_w, check_writable_w, check_seekable_w) from pypy.module._io.interp_io import W_BlockingIOError from rpython.rlib import rthread -import errno STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) -def trap_eintr(space, error): - # Return True if an EnvironmentError with errno == EINTR is set - if not error.match(space, space.w_EnvironmentError): - return False - try: - w_value = error.get_w_value(space) - w_errno = space.getattr(w_value, space.wrap("errno")) - return space.is_true( - space.eq(w_errno, space.wrap(errno.EINTR))) - except OperationError: - return False - def make_write_blocking_error(space, written): w_type = space.gettypeobject(W_BlockingIOError.typedef) @@ -58,7 +45,7 @@ raise self.operr self.lock.acquire(True) self.owner = rthread.get_ident() - + def __exit__(self,*args): self.owner = 0 self.lock.release() diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -390,10 +390,13 @@ try: chunk = os.read(self.fd, newsize - total) except OSError, e: + if e.errno == errno.EINTR: + space.getexecutioncontext().checksignals() + continue + if total > 0: + # return what we've got so far + break if e.errno == errno.EAGAIN: - if total > 0: - # return what we've got so far - break return space.w_None raise wrap_oserror(space, e, exception_name='w_IOError') diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -1,3 +1,4 @@ +from errno import EINTR from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import ( @@ -16,6 +17,18 @@ else: return space.int_w(w_size) +def trap_eintr(space, error): + # Return True if an EnvironmentError with errno == EINTR is set + if not error.match(space, space.w_EnvironmentError): + return False + try: + w_value = error.get_w_value(space) + w_errno = space.getattr(w_value, space.wrap("errno")) + return space.is_true( + space.eq(w_errno, space.wrap(EINTR))) + except OperationError: + return False + def unsupported(space, message): w_exc = space.getattr(space.getbuiltinmodule('_io'), space.wrap('UnsupportedOperation')) @@ -178,7 +191,12 @@ nreadahead = 1 if has_peek: - w_readahead = space.call_method(self, "peek", space.wrap(1)) + try: + w_readahead = space.call_method(self, "peek", space.wrap(1)) + except OperationError, e: + if trap_eintr(space, e): + continue + raise if not space.isinstance_w(w_readahead, space.w_str): raise oefmt(space.w_IOError, "peek() should have returned a bytes object, " @@ -203,7 +221,12 @@ break nreadahead = n - w_read = space.call_method(self, "read", space.wrap(nreadahead)) + try: + w_read = space.call_method(self, "read", space.wrap(nreadahead)) + except OperationError, e: + if trap_eintr(space, e): + continue + raise if not space.isinstance_w(w_read, space.w_str): raise oefmt(space.w_IOError, "peek() should have returned a bytes object, not " @@ -254,7 +277,15 @@ if not e.match(space, space.w_StopIteration): raise break # done - space.call_method(self, "write", w_line) + while True: + try: + space.call_method(self, "write", w_line) + except OperationError, e: + if trap_eintr(space, e): + continue + raise + else: + break W_IOBase.typedef = TypeDef( '_IOBase', @@ -306,8 +337,13 @@ def readall_w(self, space): builder = StringBuilder() while True: - w_data = space.call_method(self, "read", - space.wrap(DEFAULT_BUFFER_SIZE)) + try: + w_data = space.call_method(self, "read", + space.wrap(DEFAULT_BUFFER_SIZE)) + except OperationError, e: + if trap_eintr(space, e): + continue + raise if space.is_w(w_data, space.w_None): if not builder.getlength(): return w_data diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -7,7 +7,7 @@ GetSetProperty, TypeDef, generic_new_descr, interp_attrproperty, interp_attrproperty_w) from pypy.module._codecs import interp_codecs -from pypy.module._io.interp_iobase import W_IOBase, convert_size +from pypy.module._io.interp_iobase import W_IOBase, convert_size, trap_eintr from rpython.rlib.rarithmetic import intmask, r_uint, r_ulonglong from rpython.rlib.rbigint import rbigint from rpython.rlib.rstring import UnicodeBuilder @@ -614,9 +614,14 @@ if remaining <= 0: # Done break - if not self._read_chunk(space): - # EOF - break + try: + if not self._read_chunk(space): + # EOF + break + except OperationError, e: + if trap_eintr(space, e): + continue + raise return space.wrap(builder.build()) @@ -635,9 +640,14 @@ # First, get some data if necessary has_data = True while not self.decoded_chars: - if not self._read_chunk(space): - has_data = False - break + try: + if not self._read_chunk(space): + has_data = False + break + except OperationError, e: + if trap_eintr(space, e): + continue + raise if not has_data: # end of file self._set_decoded_chars(None) @@ -772,7 +782,15 @@ self.pending_bytes = None self.pending_bytes_count = 0 - space.call_method(self.w_buffer, "write", space.wrap(pending_bytes)) + while True: + try: + space.call_method(self.w_buffer, "write", space.wrap(pending_bytes)) + except OperationError, e: + if trap_eintr(space, e): + continue + raise + else: + break def detach_w(self, space): self._check_init(space) From noreply at buildbot.pypy.org Tue Mar 4 11:12:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 11:12:07 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: try to make this test more robust Message-ID: <20140304101207.E8C021C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69667:84d26c43b3d2 Date: 2014-03-04 05:11 -0500 http://bitbucket.org/pypy/pypy/changeset/84d26c43b3d2/ Log: try to make this test more robust diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -271,7 +271,8 @@ t = threading.Thread(target=pollster.poll) t.start() try: - time.sleep(0.5); print '', # print to release GIL untranslated + for i in range(5): + time.sleep(0.1); print '', # print to release GIL untranslated # trigger ufds array reallocation for fd in rfds: pollster.unregister(fd) From noreply at buildbot.pypy.org Tue Mar 4 17:56:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Mar 2014 17:56:04 +0100 (CET) Subject: [pypy-commit] stmgc default: Add an assert Message-ID: <20140304165604.3FE661C315E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r945:5f8edec122fe Date: 2014-03-04 17:55 +0100 http://bitbucket.org/pypy/stmgc/changeset/5f8edec122fe/ Log: Add an assert diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -227,6 +227,7 @@ { assert(!_is_young(obj)); assert((obj->stm_flags & GCFLAG_SMALL_UNIFORM) == 0); + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); From noreply at buildbot.pypy.org Tue Mar 4 17:56:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Mar 2014 17:56:05 +0100 (CET) Subject: [pypy-commit] stmgc default: Small refactoring to avoid the step that clears the write locks first. Message-ID: <20140304165605.510511C315E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r946:732edc0f0e90 Date: 2014-03-04 17:55 +0100 http://bitbucket.org/pypy/stmgc/changeset/732edc0f0e90/ Log: Small refactoring to avoid the step that clears the write locks first. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -145,7 +145,7 @@ static struct list_s *mark_objects_to_trace; -#define WL_VISITED 42 +#define WL_VISITED 255 static inline uintptr_t mark_loc(object_t *obj) @@ -159,15 +159,13 @@ static inline bool mark_visited_test(object_t *obj) { uintptr_t lock_idx = mark_loc(obj); - assert(write_locks[lock_idx] == 0 || write_locks[lock_idx] == WL_VISITED); - return write_locks[lock_idx] != 0; + return write_locks[lock_idx] == WL_VISITED; } static inline bool mark_visited_test_and_set(object_t *obj) { uintptr_t lock_idx = mark_loc(obj); - assert(write_locks[lock_idx] == 0 || write_locks[lock_idx] == WL_VISITED); - if (write_locks[lock_idx] != 0) { + if (write_locks[lock_idx] == WL_VISITED) { return true; } else { @@ -179,8 +177,7 @@ static inline bool mark_visited_test_and_clear(object_t *obj) { uintptr_t lock_idx = mark_loc(obj); - assert(write_locks[lock_idx] == 0 || write_locks[lock_idx] == WL_VISITED); - if (write_locks[lock_idx] != 0) { + if (write_locks[lock_idx] == WL_VISITED) { write_locks[lock_idx] = 0; return true; } @@ -492,25 +489,6 @@ memset(write_locks + lock2_idx, 0, sizeof(write_locks) - lock2_idx); } -static void major_clear_write_locks(void) -{ - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - struct stm_priv_segment_info_s *pseg = get_priv_segment(i); - - LIST_FOREACH_R( - pseg->modified_old_objects, - object_t * /*item*/, - ({ - assert(item != NULL); - - uintptr_t lock_idx = mark_loc(item); - assert(write_locks[lock_idx] == pseg->write_lock_num); - write_locks[lock_idx] = 0; - })); - } -} - static void major_set_write_locks(void) { /* restore the write locks on the modified objects */ @@ -541,8 +519,6 @@ dprintf((" | used before collection: %ld\n", (long)pages_ctl.total_allocated)); - major_clear_write_locks(); - /* marking */ LIST_CREATE(mark_objects_to_trace); mark_visit_from_modified_objects(); From noreply at buildbot.pypy.org Tue Mar 4 17:58:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Mar 2014 17:58:47 +0100 (CET) Subject: [pypy-commit] stmgc default: This belongs together with 732edc0f0e90 Message-ID: <20140304165847.185D01C317E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r947:7bc0bad288b5 Date: 2014-03-04 17:58 +0100 http://bitbucket.org/pypy/stmgc/changeset/7bc0bad288b5/ Log: This belongs together with 732edc0f0e90 diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -49,7 +49,7 @@ PROT_NONE); struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(i + 1 <= 255); + assert(i + 1 < 255); /* 255 is WL_VISITED in gcpage.c */ pr->write_lock_num = i + 1; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; From noreply at buildbot.pypy.org Tue Mar 4 18:52:14 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:14 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: start small Message-ID: <20140304175214.80A821C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69668:afa678215f7a Date: 2014-03-03 17:25 +0100 http://bitbucket.org/pypy/pypy/changeset/afa678215f7a/ Log: start small diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -999,7 +999,19 @@ self.sizehint = hint -class RangeListStrategy(ListStrategy): +class BaseRangeListStrategy(ListStrategy): + pass + + +class SimpleRangeListStrategy(BaseRangeListStrategy): + """SimpleRangeListStrategy is used when a list is created using the range + method providing only positive length. The storage is a positive integer + less than 2**31 - 1 storing length.""" + + _applevel_repr = "simple_range" + + +class RangeListStrategy(BaseRangeListStrategy): """RangeListStrategy is used when a list is created using the range method. The storage is a tuple containing only three integers start, step and length and elements are calculated based on these values. On any operation From noreply at buildbot.pypy.org Tue Mar 4 18:52:20 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:20 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: use tuple for storage Message-ID: <20140304175220.6AE551C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69673:16d6e5eb5c28 Date: 2014-03-04 11:18 +0100 http://bitbucket.org/pypy/pypy/changeset/16d6e5eb5c28/ Log: use tuple for storage diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -45,7 +45,7 @@ storage = strategy.erase(None) elif start == 0 and step == 1 and length <= 2 ** 31 - 1: strategy = space.fromcache(SimpleRangeListStrategy) - storage = strategy.erase(length) + storage = strategy.erase((length,)) else: strategy = space.fromcache(RangeListStrategy) storage = strategy.erase((start, step, length)) @@ -1041,7 +1041,7 @@ return self._getitems_range(w_list, True) def getstorage_copy(self, w_list): - # tuple/int is immutable + # tuple is immutable return w_list.lstorage @jit.dont_look_inside @@ -1094,8 +1094,8 @@ class SimpleRangeListStrategy(BaseRangeListStrategy): """SimpleRangeListStrategy is used when a list is created using the range - method providing only positive length. The storage is a positive integer - less than 2**31 - 1 storing length.""" + method providing only positive length. The storage is a one element tuple + with positive integer less than 2**31 - 1 storing length.""" _applevel_repr = "simple_range" @@ -1106,7 +1106,7 @@ def find(self, w_list, w_obj, startindex, stopindex): if type(w_obj) is W_IntObject: obj = self.unwrap(w_obj) - length = self.unerase(w_list.lstorage) + length = self.unerase(w_list.lstorage)[0] if 0 <= obj < length and startindex <= obj < stopindex: return obj else: @@ -1114,7 +1114,7 @@ return ListStrategy.find(self, w_list, w_obj, startindex, stopindex) def length(self, w_list): - return self.unerase(w_list.lstorage) + return self.unerase(w_list.lstorage)[0] def _getitem_unwrapped(self, w_list, i): length = self.unerase(w_list.lstorage) @@ -1125,7 +1125,7 @@ @specialize.arg(2) def _getitems_range(self, w_list, wrap_items): - length = self.unerase(w_list.lstorage) + length = self.unerase(w_list.lstorage)[0] if wrap_items: r = [None] * length else: @@ -1144,9 +1144,9 @@ func_with_new_name(_getitems_range, "_getitems_range_unroll")) def pop_end(self, w_list): - length_m1 = self.unerase(w_list.lstorage) - 1 + length_m1 = self.unerase(w_list.lstorage)[0] - 1 w_result = self.wrap(length_m1) - w_list.lstorage = self.erase(length_m1) + w_list.lstorage = self.erase((length_m1,)) return w_result def pop(self, w_list, index): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -418,7 +418,6 @@ assert isinstance(l.strategy, SimpleRangeListStrategy) v = l.pop(0) assert self.space.eq_w(v, self.space.wrap(0)) - # XXX promote to RangeListStrategy assert isinstance(l.strategy, IntegerListStrategy) l = make_range_list(self.space, 0, 1, 10) From noreply at buildbot.pypy.org Tue Mar 4 18:52:15 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:15 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: progress Message-ID: <20140304175215.AC66C1C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69669:8e13e6806176 Date: 2014-03-03 19:43 +0100 http://bitbucket.org/pypy/pypy/changeset/8e13e6806176/ Log: progress diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1000,26 +1000,6 @@ class BaseRangeListStrategy(ListStrategy): - pass - - -class SimpleRangeListStrategy(BaseRangeListStrategy): - """SimpleRangeListStrategy is used when a list is created using the range - method providing only positive length. The storage is a positive integer - less than 2**31 - 1 storing length.""" - - _applevel_repr = "simple_range" - - -class RangeListStrategy(BaseRangeListStrategy): - """RangeListStrategy is used when a list is created using the range method. - The storage is a tuple containing only three integers start, step and - length and elements are calculated based on these values. On any operation - destroying the range (inserting, appending non-ints) the strategy is - switched to IntegerListStrategy.""" - - _applevel_repr = "range" - def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) @@ -1034,12 +1014,8 @@ def init_from_list_w(self, w_list, list_w): raise NotImplementedError - erase, unerase = rerased.new_erasing_pair("range") - erase = staticmethod(erase) - unerase = staticmethod(unerase) - def clone(self, w_list): - storage = w_list.lstorage # lstorage is tuple, no need to clone + storage = w_list.lstorage # lstorage is tuple/int, no need to clone w_clone = W_ListObject.from_storage_and_strategy(self.space, storage, self) return w_clone @@ -1052,6 +1028,65 @@ w_other.strategy = self w_other.lstorage = w_list.lstorage + def getitem(self, w_list, i): + return self.wrap(self._getitem_unwrapped(w_list, i)) + + def getitems_int(self, w_list): + return self._getitems_range(w_list, False) + + def getitems_copy(self, w_list): + return self._getitems_range(w_list, True) + + def getstorage_copy(self, w_list): + # tuple/int is immutable + return w_list.lstorage + + +class SimpleRangeListStrategy(BaseRangeListStrategy): + """SimpleRangeListStrategy is used when a list is created using the range + method providing only positive length. The storage is a positive integer + less than 2**31 - 1 storing length.""" + + _applevel_repr = "simple_range" + + erase, unerase = rerased.new_erasing_pair("simple_range") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def find(self, w_list, w_obj, startindex, stopindex): + if type(w_obj) is W_IntObject: + obj = self.unwrap(w_obj) + length = self.unerase(w_list.lstorage) + if 0 <= obj < length and startindex <= obj < stopindex: + return obj + else: + raise ValueError + return ListStrategy.find(self, w_list, w_obj, startindex, stopindex) + + def length(self, w_list): + return self.unerase(w_list.lstorage) + + def _getitem_unwrapped(self, w_list, i): + length = self.unerase(w_list.lstorage) + if 0 <= i < length: + return i + else: + raise IndexError + + +class RangeListStrategy(BaseRangeListStrategy): + """RangeListStrategy is used when a list is created using the range method. + The storage is a tuple containing only three integers start, step and + length and elements are calculated based on these values. On any operation + destroying the range (inserting, appending non-ints) the strategy is + switched to IntegerListStrategy.""" + + _applevel_repr = "range" + + erase, unerase = rerased.new_erasing_pair("range") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + def find(self, w_list, w_obj, startindex, stopindex): if type(w_obj) is W_IntObject: obj = self.unwrap(w_obj) @@ -1084,19 +1119,6 @@ raise IndexError return start + i * step - def getitems_int(self, w_list): - return self._getitems_range(w_list, False) - - def getitem(self, w_list, i): - return self.wrap(self._getitem_unwrapped(w_list, i)) - - def getitems_copy(self, w_list): - return self._getitems_range(w_list, True) - - def getstorage_copy(self, w_list): - # tuple is unmutable - return w_list.lstorage - @specialize.arg(2) def _getitems_range(self, w_list, wrap_items): l = self.unerase(w_list.lstorage) From noreply at buildbot.pypy.org Tue Mar 4 18:52:21 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:21 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: some asserts for annotator Message-ID: <20140304175221.A97E91C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69674:b80212121437 Date: 2014-03-04 11:28 +0100 http://bitbucket.org/pypy/pypy/changeset/b80212121437/ Log: some asserts for annotator diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -43,8 +43,9 @@ if length <= 0: strategy = space.fromcache(EmptyListStrategy) storage = strategy.erase(None) - elif start == 0 and step == 1 and length <= 2 ** 31 - 1: + elif start == 0 and step == 1: strategy = space.fromcache(SimpleRangeListStrategy) + assert length > 0 storage = strategy.erase((length,)) else: strategy = space.fromcache(RangeListStrategy) @@ -1146,11 +1147,11 @@ def pop_end(self, w_list): length_m1 = self.unerase(w_list.lstorage)[0] - 1 w_result = self.wrap(length_m1) + assert length_m1 > 0 w_list.lstorage = self.erase((length_m1,)) return w_result def pop(self, w_list, index): - # XXX could be promoted to RangeListStrategy self.switch_to_integer_strategy(w_list) return w_list.pop(index) From noreply at buildbot.pypy.org Tue Mar 4 18:52:16 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:16 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: finish spliting strategy methods Message-ID: <20140304175216.DA3291C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69670:0fe74ad8f602 Date: 2014-03-03 20:51 +0100 http://bitbucket.org/pypy/pypy/changeset/0fe74ad8f602/ Log: finish spliting strategy methods diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1041,6 +1041,52 @@ # tuple/int is immutable return w_list.lstorage + @jit.dont_look_inside + def getitems_fixedsize(self, w_list): + return self._getitems_range_unroll(w_list, True) + + def getitems_unroll(self, w_list): + return self._getitems_range_unroll(w_list, True) + + def getslice(self, w_list, start, stop, step, length): + self.switch_to_integer_strategy(w_list) + return w_list.getslice(start, stop, step, length) + + def append(self, w_list, w_item): + if type(w_item) is W_IntObject: + self.switch_to_integer_strategy(w_list) + else: + w_list.switch_to_object_strategy() + w_list.append(w_item) + + def inplace_mul(self, w_list, times): + self.switch_to_integer_strategy(w_list) + w_list.inplace_mul(times) + + def deleteslice(self, w_list, start, step, slicelength): + self.switch_to_integer_strategy(w_list) + w_list.deleteslice(start, step, slicelength) + + def setitem(self, w_list, index, w_item): + self.switch_to_integer_strategy(w_list) + w_list.setitem(index, w_item) + + def setslice(self, w_list, start, step, slicelength, sequence_w): + self.switch_to_integer_strategy(w_list) + w_list.setslice(start, step, slicelength, sequence_w) + + def insert(self, w_list, index, w_item): + self.switch_to_integer_strategy(w_list) + w_list.insert(index, w_item) + + def extend(self, w_list, w_any): + self.switch_to_integer_strategy(w_list) + w_list.extend(w_any) + + def reverse(self, w_list): + self.switch_to_integer_strategy(w_list) + w_list.reverse() + class SimpleRangeListStrategy(BaseRangeListStrategy): """SimpleRangeListStrategy is used when a list is created using the range @@ -1073,6 +1119,42 @@ else: raise IndexError + @specialize.arg(2) + def _getitems_range(self, w_list, wrap_items): + length = self.unerase(w_list.lstorage) + if wrap_items: + r = [None] * length + else: + r = [0] * length + i = 0 + while i < length: + if wrap_items: + r[n] = self.wrap(i) + else: + r[n] = i + i += 1 + + return r + + _getitems_range_unroll = jit.unroll_safe( + func_with_new_name(_getitems_range, "_getitems_range_unroll")) + + def pop_end(self, w_list): + length_m1 = self.unerase(w_list.lstorage) - 1 + w_result = self.wrap(length_m1) + w_list.lstorage = self.erase(length_m1) + return w_result + + def pop(self, w_list, index): + # XXX could be promoted to RangeListStrategy + self.switch_to_integer_strategy(w_list) + return w_list.pop(index) + + def sort(self, w_list, reverse): + if reverse: + self.switch_to_integer_strategy(w_list) + w_list.sort(reverse) + class RangeListStrategy(BaseRangeListStrategy): """RangeListStrategy is used when a list is created using the range method. @@ -1141,34 +1223,9 @@ return r - @jit.dont_look_inside - def getitems_fixedsize(self, w_list): - return self._getitems_range_unroll(w_list, True) - - def getitems_unroll(self, w_list): - return self._getitems_range_unroll(w_list, True) _getitems_range_unroll = jit.unroll_safe( func_with_new_name(_getitems_range, "_getitems_range_unroll")) - def getslice(self, w_list, start, stop, step, length): - self.switch_to_integer_strategy(w_list) - return w_list.getslice(start, stop, step, length) - - def append(self, w_list, w_item): - if type(w_item) is W_IntObject: - self.switch_to_integer_strategy(w_list) - else: - w_list.switch_to_object_strategy() - w_list.append(w_item) - - def inplace_mul(self, w_list, times): - self.switch_to_integer_strategy(w_list) - w_list.inplace_mul(times) - - def deleteslice(self, w_list, start, step, slicelength): - self.switch_to_integer_strategy(w_list) - w_list.deleteslice(start, step, slicelength) - def pop_end(self, w_list): start, step, length = self.unerase(w_list.lstorage) w_result = self.wrap(start + (length - 1) * step) @@ -1192,32 +1249,12 @@ self.switch_to_integer_strategy(w_list) return w_list.pop(index) - def setitem(self, w_list, index, w_item): - self.switch_to_integer_strategy(w_list) - w_list.setitem(index, w_item) - - def setslice(self, w_list, start, step, slicelength, sequence_w): - self.switch_to_integer_strategy(w_list) - w_list.setslice(start, step, slicelength, sequence_w) - def sort(self, w_list, reverse): step = self.unerase(w_list.lstorage)[1] if step > 0 and reverse or step < 0 and not reverse: self.switch_to_integer_strategy(w_list) w_list.sort(reverse) - def insert(self, w_list, index, w_item): - self.switch_to_integer_strategy(w_list) - w_list.insert(index, w_item) - - def extend(self, w_list, w_any): - self.switch_to_integer_strategy(w_list) - w_list.extend(w_any) - - def reverse(self, w_list): - self.switch_to_integer_strategy(w_list) - w_list.reverse() - class AbstractUnwrappedStrategy(object): From noreply at buildbot.pypy.org Tue Mar 4 18:52:22 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:22 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: more tests Message-ID: <20140304175222.D3DCC1C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69675:1c5083f4b9a5 Date: 2014-03-04 11:49 +0100 http://bitbucket.org/pypy/pypy/changeset/1c5083f4b9a5/ Log: more tests diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -439,6 +439,29 @@ l.append(self.space.wrap(19)) assert isinstance(l.strategy, IntegerListStrategy) + l = make_range_list(self.space, 0,1,5) + assert isinstance(l.strategy, SimpleRangeListStrategy) + assert l.find(self.space.wrap(0)) == 0 + assert l.find(self.space.wrap(4)) == 4 + + try: + l.find(self.space.wrap(5)) + except ValueError: + pass + else: + assert False, "Did not raise ValueError" + + try: + l.find(self.space.wrap(0), 5, 6) + except ValueError: + pass + else: + assert False, "Did not raise ValueError" + + assert l.length() == 5 + + + def test_keep_range(self): # simple list From noreply at buildbot.pypy.org Tue Mar 4 18:52:24 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:24 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: move sort to base class Message-ID: <20140304175224.0F6DE1C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69676:1e70d4d7d970 Date: 2014-03-04 12:43 +0100 http://bitbucket.org/pypy/pypy/changeset/1e70d4d7d970/ Log: move sort to base class diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1088,15 +1088,20 @@ w_list.extend(w_any) def reverse(self, w_list): - # XXX this could be specialized for SimpleRange to promote to Range self.switch_to_integer_strategy(w_list) w_list.reverse() + def sort(self, w_list, reverse): + step = self.step(w_list) + if step > 0 and reverse or step < 0 and not reverse: + self.switch_to_integer_strategy(w_list) + w_list.sort(reverse) + class SimpleRangeListStrategy(BaseRangeListStrategy): """SimpleRangeListStrategy is used when a list is created using the range method providing only positive length. The storage is a one element tuple - with positive integer less than 2**31 - 1 storing length.""" + with positive integer storing length.""" _applevel_repr = "simple_range" @@ -1117,6 +1122,9 @@ def length(self, w_list): return self.unerase(w_list.lstorage)[0] + def step(self, w_list): + return 1 + def _getitem_unwrapped(self, w_list, i): length = self.unerase(w_list.lstorage) if 0 <= i < length: @@ -1155,11 +1163,6 @@ self.switch_to_integer_strategy(w_list) return w_list.pop(index) - def sort(self, w_list, reverse): - if reverse: - self.switch_to_integer_strategy(w_list) - w_list.sort(reverse) - class RangeListStrategy(BaseRangeListStrategy): """RangeListStrategy is used when a list is created using the range method. @@ -1193,6 +1196,9 @@ def length(self, w_list): return self.unerase(w_list.lstorage)[2] + def step(self, w_list): + return self.unerase(w_list.lstorage)[1] + def _getitem_unwrapped(self, w_list, i): v = self.unerase(w_list.lstorage) start = v[0] @@ -1254,12 +1260,6 @@ self.switch_to_integer_strategy(w_list) return w_list.pop(index) - def sort(self, w_list, reverse): - step = self.unerase(w_list.lstorage)[1] - if step > 0 and reverse or step < 0 and not reverse: - self.switch_to_integer_strategy(w_list) - w_list.sort(reverse) - class AbstractUnwrappedStrategy(object): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -460,8 +460,12 @@ assert l.length() == 5 + l = make_range_list(self.space, 0, 1, 1) + assert self.space.eq_w(l.pop(0), self.space.wrap(0)) - + l = make_range_list(self.space, 0, 1, 10) + l.sort(False) + assert isinstance(l.strategy, SimpleRangeListStrategy) def test_keep_range(self): # simple list From noreply at buildbot.pypy.org Tue Mar 4 18:52:18 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:18 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: make range now uses SimpeRangeListStrategy Message-ID: <20140304175218.0F0711C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69671:8d005ad2deff Date: 2014-03-03 21:02 +0100 http://bitbucket.org/pypy/pypy/changeset/8d005ad2deff/ Log: make range now uses SimpeRangeListStrategy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -43,6 +43,9 @@ if length <= 0: strategy = space.fromcache(EmptyListStrategy) storage = strategy.erase(None) + elif start == 0 and step == 1 and length <= 2 ** 31 - 1: + strategy = space.fromcache(SimpleRangeListStrategy) + storage = strategy.erase(length) else: strategy = space.fromcache(RangeListStrategy) storage = strategy.erase((start, step, length)) @@ -1084,6 +1087,7 @@ w_list.extend(w_any) def reverse(self, w_list): + # XXX this could be specialized for SimpleRange to promote to Range self.switch_to_integer_strategy(w_list) w_list.reverse() @@ -1129,9 +1133,9 @@ i = 0 while i < length: if wrap_items: - r[n] = self.wrap(i) + r[i] = self.wrap(i) else: - r[n] = i + r[i] = i i += 1 return r From noreply at buildbot.pypy.org Tue Mar 4 18:52:25 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:25 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: oops Message-ID: <20140304175225.365741C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69677:8d186d3ee7de Date: 2014-03-04 14:08 +0100 http://bitbucket.org/pypy/pypy/changeset/8d186d3ee7de/ Log: oops diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1126,7 +1126,8 @@ return 1 def _getitem_unwrapped(self, w_list, i): - length = self.unerase(w_list.lstorage) + length = self.unerase(w_list.lstorage)[0] + assert length > 0 if 0 <= i < length: return i else: From noreply at buildbot.pypy.org Tue Mar 4 18:52:19 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:19 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: fix tests and add more Message-ID: <20140304175219.3FABA1C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69672:dd78cd5b33b4 Date: 2014-03-03 22:11 +0100 http://bitbucket.org/pypy/pypy/changeset/dd78cd5b33b4/ Log: fix tests and add more diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1616,7 +1616,7 @@ _base_extend_from_list = _extend_from_list def _extend_from_list(self, w_list, w_other): - if w_other.strategy is self.space.fromcache(RangeListStrategy): + if isinstance(w_other.strategy, BaseRangeListStrategy): l = self.unerase(w_list.lstorage) other = w_other.getitems_int() assert other is not None diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,5 +1,8 @@ import sys -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, BytesListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy +from pypy.objspace.std.listobject import ( + W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, + FloatListStrategy, BytesListStrategy, RangeListStrategy, + SimpleRangeListStrategy, make_range_list, UnicodeListStrategy) from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -18,7 +21,7 @@ UnicodeListStrategy) assert isinstance(W_ListObject(space, [w(u'a'), w('b')]).strategy, ObjectListStrategy) # mixed unicode and bytes - + def test_empty_to_any(self): space = self.space w = space.wrap @@ -183,7 +186,7 @@ def test_setslice(self): space = self.space w = space.wrap - + l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) l.setslice(0, 1, 2, W_ListObject(space, [w(1), w(2), w(3)])) @@ -286,7 +289,7 @@ def test_empty_setslice_with_objectlist(self): space = self.space w = space.wrap - + l = W_ListObject(space, []) o = W_ListObject(space, [space.wrap(1), space.wrap("2"), space.wrap(3)]) l.setslice(0, 1, o.length(), o) @@ -347,6 +350,13 @@ empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) + r = make_range_list(space, 0, 1, 10) + empty.extend(r) + assert isinstance(empty.strategy, SimpleRangeListStrategy) + assert space.is_true(space.eq(empty.getitem(1), w(1))) + + empty = W_ListObject(space, []) + assert isinstance(empty.strategy, EmptyListStrategy) empty.extend(W_ListObject(space, [w(1), w(2), w(3)])) assert isinstance(empty.strategy, IntegerListStrategy) @@ -397,6 +407,40 @@ l.append(self.space.wrap(19)) assert isinstance(l.strategy, IntegerListStrategy) + def test_simplerangelist(self): + l = make_range_list(self.space, 0, 1, 10) + assert isinstance(l.strategy, SimpleRangeListStrategy) + v = l.pop(5) + assert self.space.eq_w(v, self.space.wrap(5)) + assert isinstance(l.strategy, IntegerListStrategy) + + l = make_range_list(self.space, 0, 1, 10) + assert isinstance(l.strategy, SimpleRangeListStrategy) + v = l.pop(0) + assert self.space.eq_w(v, self.space.wrap(0)) + # XXX promote to RangeListStrategy + assert isinstance(l.strategy, IntegerListStrategy) + + l = make_range_list(self.space, 0, 1, 10) + assert isinstance(l.strategy, SimpleRangeListStrategy) + v = l.pop_end() + assert self.space.eq_w(v, self.space.wrap(9)) + assert isinstance(l.strategy, SimpleRangeListStrategy) + v = l.pop_end() + assert self.space.eq_w(v, self.space.wrap(8)) + assert isinstance(l.strategy, SimpleRangeListStrategy) + + l = make_range_list(self.space, 0, 1, 5) + assert isinstance(l.strategy, SimpleRangeListStrategy) + l.append(self.space.wrap("string")) + assert isinstance(l.strategy, ObjectListStrategy) + + l = make_range_list(self.space, 0,1,5) + assert isinstance(l.strategy, SimpleRangeListStrategy) + l.append(self.space.wrap(19)) + assert isinstance(l.strategy, IntegerListStrategy) + + def test_keep_range(self): # simple list l = make_range_list(self.space, 1,1,5) From noreply at buildbot.pypy.org Tue Mar 4 18:52:26 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:26 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: test for fixed failure Message-ID: <20140304175226.6C3F31C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69678:0697804c87a8 Date: 2014-03-04 14:53 +0100 http://bitbucket.org/pypy/pypy/changeset/0697804c87a8/ Log: test for fixed failure diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1019,7 +1019,7 @@ raise NotImplementedError def clone(self, w_list): - storage = w_list.lstorage # lstorage is tuple/int, no need to clone + storage = w_list.lstorage # lstorage is tuple, no need to clone w_clone = W_ListObject.from_storage_and_strategy(self.space, storage, self) return w_clone diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -467,6 +467,8 @@ l.sort(False) assert isinstance(l.strategy, SimpleRangeListStrategy) + assert self.space.eq_w(l.getitem(5), self.space.wrap(5)) + def test_keep_range(self): # simple list l = make_range_list(self.space, 1,1,5) From noreply at buildbot.pypy.org Tue Mar 4 18:52:27 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:27 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: fix test Message-ID: <20140304175227.9DAB51C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69679:97595f9d3e7f Date: 2014-03-04 17:26 +0100 http://bitbucket.org/pypy/pypy/changeset/97595f9d3e7f/ Log: fix test diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1154,10 +1154,13 @@ func_with_new_name(_getitems_range, "_getitems_range_unroll")) def pop_end(self, w_list): - length_m1 = self.unerase(w_list.lstorage)[0] - 1 - w_result = self.wrap(length_m1) - assert length_m1 > 0 - w_list.lstorage = self.erase((length_m1,)) + new_length = self.unerase(w_list.lstorage)[0] - 1 + w_result = self.wrap(new_length) + if new_length > 0: + w_list.lstorage = self.erase((new_length,)) + else: + strategy = w_list.strategy = self.space.fromcache(EmptyListStrategy) + w_list.lstorage = strategy.erase(None) return w_result def pop(self, w_list, index): From noreply at buildbot.pypy.org Tue Mar 4 18:52:28 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:28 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: want more test case Message-ID: <20140304175228.CD5A71C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69680:d0f90833cf28 Date: 2014-03-04 17:31 +0100 http://bitbucket.org/pypy/pypy/changeset/d0f90833cf28/ Log: want more test case diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -469,6 +469,10 @@ assert self.space.eq_w(l.getitem(5), self.space.wrap(5)) + l = make_range_list(self.space, 0, 1, 1) + assert self.space.eq_w(l.pop_end(), self.space.wrap(0)) + assert isinstance(l.strategy, EmptyListStrategy) + def test_keep_range(self): # simple list l = make_range_list(self.space, 1,1,5) From noreply at buildbot.pypy.org Tue Mar 4 18:52:29 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:29 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: fix range tests Message-ID: <20140304175229.F2BF61C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69681:499b5b290cd9 Date: 2014-03-04 18:09 +0100 http://bitbucket.org/pypy/pypy/changeset/499b5b290cd9/ Log: fix range tests diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -72,28 +72,28 @@ r.sort(key=lambda x: -x) assert r == range(9, -1, -1) def test_pop(self): - r = range(10) + r = range(1, 10) res = r.pop() assert res == 9 assert self.not_forced(r) - assert repr(r) == repr(range(9)) + assert repr(r) == repr(range(1, 9)) res = r.pop(0) - assert res == 0 + assert res == 1 assert self.not_forced(r) - assert repr(r) == repr(range(1, 9)) + assert repr(r) == repr(range(2, 9)) res = r.pop(len(r) - 1) assert res == 8 assert self.not_forced(r) - assert repr(r) == repr(range(1, 8)) - res = r.pop(2) - assert res == 3 - assert not self.not_forced(r) - assert r == [1, 2, 4, 5, 6, 7] + assert repr(r) == repr(range(2, 8)) res = r.pop(2) assert res == 4 assert not self.not_forced(r) - assert r == [1, 2, 5, 6, 7] - + assert r == [2, 3, 5, 6, 7] + res = r.pop(2) + assert res == 5 + assert not self.not_forced(r) + assert r == [2, 3, 6, 7] + def test_reduce(self): it = iter(range(10)) assert it.next() == 0 From noreply at buildbot.pypy.org Tue Mar 4 18:52:31 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:31 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: update test_pop in test_rangeobject Message-ID: <20140304175231.21F691C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69682:0e7a793d817a Date: 2014-03-04 18:14 +0100 http://bitbucket.org/pypy/pypy/changeset/0e7a793d817a/ Log: update test_pop in test_rangeobject diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -72,6 +72,7 @@ r.sort(key=lambda x: -x) assert r == range(9, -1, -1) def test_pop(self): + # RangeListStrategy r = range(1, 10) res = r.pop() assert res == 9 @@ -94,6 +95,19 @@ assert not self.not_forced(r) assert r == [2, 3, 6, 7] + # SimpleRangeListStrategy + r = range(10) + res = r.pop() + assert res == 9 + assert self.not_forced(r) + res = r.pop() + assert res == 8 + assert self.not_forced(r) + res = r.pop(0) + assert res == 0 + assert not self.not_forced(r) + assert r == [1, 2, 3, 4, 5, 6, 7] + def test_reduce(self): it = iter(range(10)) assert it.next() == 0 From noreply at buildbot.pypy.org Tue Mar 4 18:52:32 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:32 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: and this Message-ID: <20140304175232.4C9451C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69683:eb5fef9132bf Date: 2014-03-04 18:16 +0100 http://bitbucket.org/pypy/pypy/changeset/eb5fef9132bf/ Log: and this diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -102,6 +102,7 @@ assert self.not_forced(r) res = r.pop() assert res == 8 + assert repr(r) == repr(range(8)) assert self.not_forced(r) res = r.pop(0) assert res == 0 From noreply at buildbot.pypy.org Tue Mar 4 18:52:33 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:33 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: document branch Message-ID: <20140304175233.8072A1C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69684:23c813f8c613 Date: 2014-03-04 18:45 +0100 http://bitbucket.org/pypy/pypy/changeset/23c813f8c613/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -94,3 +94,8 @@ .. branch: test-58c3d8552833 Fix for getarrayitem_gc_pure optimization + +.. branch: simple-range-strategy +Implements SimpleRangeListStrategy for case range(n) where n is a positive number. +Makes some traces nicer by getting rid of multiplication for calculating loop counter +and propagates that n > 0 further to get rid of guards. From noreply at buildbot.pypy.org Tue Mar 4 18:52:34 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:34 +0100 (CET) Subject: [pypy-commit] pypy default: Implements SimpleRangeListStrategy for case range(n) where n is a positive number. Message-ID: <20140304175234.B57BF1C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: Changeset: r69685:4d70ac4b69b1 Date: 2014-03-04 18:47 +0100 http://bitbucket.org/pypy/pypy/changeset/4d70ac4b69b1/ Log: Implements SimpleRangeListStrategy for case range(n) where n is a positive number. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -94,3 +94,8 @@ .. branch: test-58c3d8552833 Fix for getarrayitem_gc_pure optimization + +.. branch: simple-range-strategy +Implements SimpleRangeListStrategy for case range(n) where n is a positive number. +Makes some traces nicer by getting rid of multiplication for calculating loop counter +and propagates that n > 0 further to get rid of guards. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -43,6 +43,10 @@ if length <= 0: strategy = space.fromcache(EmptyListStrategy) storage = strategy.erase(None) + elif start == 0 and step == 1: + strategy = space.fromcache(SimpleRangeListStrategy) + assert length > 0 + storage = strategy.erase((length,)) else: strategy = space.fromcache(RangeListStrategy) storage = strategy.erase((start, step, length)) @@ -999,15 +1003,7 @@ self.sizehint = hint -class RangeListStrategy(ListStrategy): - """RangeListStrategy is used when a list is created using the range method. - The storage is a tuple containing only three integers start, step and - length and elements are calculated based on these values. On any operation - destroying the range (inserting, appending non-ints) the strategy is - switched to IntegerListStrategy.""" - - _applevel_repr = "range" - +class BaseRangeListStrategy(ListStrategy): def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) @@ -1022,10 +1018,6 @@ def init_from_list_w(self, w_list, list_w): raise NotImplementedError - erase, unerase = rerased.new_erasing_pair("range") - erase = staticmethod(erase) - unerase = staticmethod(unerase) - def clone(self, w_list): storage = w_list.lstorage # lstorage is tuple, no need to clone w_clone = W_ListObject.from_storage_and_strategy(self.space, storage, @@ -1040,6 +1032,155 @@ w_other.strategy = self w_other.lstorage = w_list.lstorage + def getitem(self, w_list, i): + return self.wrap(self._getitem_unwrapped(w_list, i)) + + def getitems_int(self, w_list): + return self._getitems_range(w_list, False) + + def getitems_copy(self, w_list): + return self._getitems_range(w_list, True) + + def getstorage_copy(self, w_list): + # tuple is immutable + return w_list.lstorage + + @jit.dont_look_inside + def getitems_fixedsize(self, w_list): + return self._getitems_range_unroll(w_list, True) + + def getitems_unroll(self, w_list): + return self._getitems_range_unroll(w_list, True) + + def getslice(self, w_list, start, stop, step, length): + self.switch_to_integer_strategy(w_list) + return w_list.getslice(start, stop, step, length) + + def append(self, w_list, w_item): + if type(w_item) is W_IntObject: + self.switch_to_integer_strategy(w_list) + else: + w_list.switch_to_object_strategy() + w_list.append(w_item) + + def inplace_mul(self, w_list, times): + self.switch_to_integer_strategy(w_list) + w_list.inplace_mul(times) + + def deleteslice(self, w_list, start, step, slicelength): + self.switch_to_integer_strategy(w_list) + w_list.deleteslice(start, step, slicelength) + + def setitem(self, w_list, index, w_item): + self.switch_to_integer_strategy(w_list) + w_list.setitem(index, w_item) + + def setslice(self, w_list, start, step, slicelength, sequence_w): + self.switch_to_integer_strategy(w_list) + w_list.setslice(start, step, slicelength, sequence_w) + + def insert(self, w_list, index, w_item): + self.switch_to_integer_strategy(w_list) + w_list.insert(index, w_item) + + def extend(self, w_list, w_any): + self.switch_to_integer_strategy(w_list) + w_list.extend(w_any) + + def reverse(self, w_list): + self.switch_to_integer_strategy(w_list) + w_list.reverse() + + def sort(self, w_list, reverse): + step = self.step(w_list) + if step > 0 and reverse or step < 0 and not reverse: + self.switch_to_integer_strategy(w_list) + w_list.sort(reverse) + + +class SimpleRangeListStrategy(BaseRangeListStrategy): + """SimpleRangeListStrategy is used when a list is created using the range + method providing only positive length. The storage is a one element tuple + with positive integer storing length.""" + + _applevel_repr = "simple_range" + + erase, unerase = rerased.new_erasing_pair("simple_range") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def find(self, w_list, w_obj, startindex, stopindex): + if type(w_obj) is W_IntObject: + obj = self.unwrap(w_obj) + length = self.unerase(w_list.lstorage)[0] + if 0 <= obj < length and startindex <= obj < stopindex: + return obj + else: + raise ValueError + return ListStrategy.find(self, w_list, w_obj, startindex, stopindex) + + def length(self, w_list): + return self.unerase(w_list.lstorage)[0] + + def step(self, w_list): + return 1 + + def _getitem_unwrapped(self, w_list, i): + length = self.unerase(w_list.lstorage)[0] + assert length > 0 + if 0 <= i < length: + return i + else: + raise IndexError + + @specialize.arg(2) + def _getitems_range(self, w_list, wrap_items): + length = self.unerase(w_list.lstorage)[0] + if wrap_items: + r = [None] * length + else: + r = [0] * length + i = 0 + while i < length: + if wrap_items: + r[i] = self.wrap(i) + else: + r[i] = i + i += 1 + + return r + + _getitems_range_unroll = jit.unroll_safe( + func_with_new_name(_getitems_range, "_getitems_range_unroll")) + + def pop_end(self, w_list): + new_length = self.unerase(w_list.lstorage)[0] - 1 + w_result = self.wrap(new_length) + if new_length > 0: + w_list.lstorage = self.erase((new_length,)) + else: + strategy = w_list.strategy = self.space.fromcache(EmptyListStrategy) + w_list.lstorage = strategy.erase(None) + return w_result + + def pop(self, w_list, index): + self.switch_to_integer_strategy(w_list) + return w_list.pop(index) + + +class RangeListStrategy(BaseRangeListStrategy): + """RangeListStrategy is used when a list is created using the range method. + The storage is a tuple containing only three integers start, step and + length and elements are calculated based on these values. On any operation + destroying the range (inserting, appending non-ints) the strategy is + switched to IntegerListStrategy.""" + + _applevel_repr = "range" + + erase, unerase = rerased.new_erasing_pair("range") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + def find(self, w_list, w_obj, startindex, stopindex): if type(w_obj) is W_IntObject: obj = self.unwrap(w_obj) @@ -1059,6 +1200,9 @@ def length(self, w_list): return self.unerase(w_list.lstorage)[2] + def step(self, w_list): + return self.unerase(w_list.lstorage)[1] + def _getitem_unwrapped(self, w_list, i): v = self.unerase(w_list.lstorage) start = v[0] @@ -1072,19 +1216,6 @@ raise IndexError return start + i * step - def getitems_int(self, w_list): - return self._getitems_range(w_list, False) - - def getitem(self, w_list, i): - return self.wrap(self._getitem_unwrapped(w_list, i)) - - def getitems_copy(self, w_list): - return self._getitems_range(w_list, True) - - def getstorage_copy(self, w_list): - # tuple is unmutable - return w_list.lstorage - @specialize.arg(2) def _getitems_range(self, w_list, wrap_items): l = self.unerase(w_list.lstorage) @@ -1107,34 +1238,9 @@ return r - @jit.dont_look_inside - def getitems_fixedsize(self, w_list): - return self._getitems_range_unroll(w_list, True) - - def getitems_unroll(self, w_list): - return self._getitems_range_unroll(w_list, True) _getitems_range_unroll = jit.unroll_safe( func_with_new_name(_getitems_range, "_getitems_range_unroll")) - def getslice(self, w_list, start, stop, step, length): - self.switch_to_integer_strategy(w_list) - return w_list.getslice(start, stop, step, length) - - def append(self, w_list, w_item): - if type(w_item) is W_IntObject: - self.switch_to_integer_strategy(w_list) - else: - w_list.switch_to_object_strategy() - w_list.append(w_item) - - def inplace_mul(self, w_list, times): - self.switch_to_integer_strategy(w_list) - w_list.inplace_mul(times) - - def deleteslice(self, w_list, start, step, slicelength): - self.switch_to_integer_strategy(w_list) - w_list.deleteslice(start, step, slicelength) - def pop_end(self, w_list): start, step, length = self.unerase(w_list.lstorage) w_result = self.wrap(start + (length - 1) * step) @@ -1158,32 +1264,6 @@ self.switch_to_integer_strategy(w_list) return w_list.pop(index) - def setitem(self, w_list, index, w_item): - self.switch_to_integer_strategy(w_list) - w_list.setitem(index, w_item) - - def setslice(self, w_list, start, step, slicelength, sequence_w): - self.switch_to_integer_strategy(w_list) - w_list.setslice(start, step, slicelength, sequence_w) - - def sort(self, w_list, reverse): - step = self.unerase(w_list.lstorage)[1] - if step > 0 and reverse or step < 0 and not reverse: - self.switch_to_integer_strategy(w_list) - w_list.sort(reverse) - - def insert(self, w_list, index, w_item): - self.switch_to_integer_strategy(w_list) - w_list.insert(index, w_item) - - def extend(self, w_list, w_any): - self.switch_to_integer_strategy(w_list) - w_list.extend(w_any) - - def reverse(self, w_list): - self.switch_to_integer_strategy(w_list) - w_list.reverse() - class AbstractUnwrappedStrategy(object): @@ -1541,7 +1621,7 @@ _base_extend_from_list = _extend_from_list def _extend_from_list(self, w_list, w_other): - if w_other.strategy is self.space.fromcache(RangeListStrategy): + if isinstance(w_other.strategy, BaseRangeListStrategy): l = self.unerase(w_list.lstorage) other = w_other.getitems_int() assert other is not None diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,5 +1,8 @@ import sys -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, BytesListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy +from pypy.objspace.std.listobject import ( + W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, + FloatListStrategy, BytesListStrategy, RangeListStrategy, + SimpleRangeListStrategy, make_range_list, UnicodeListStrategy) from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -18,7 +21,7 @@ UnicodeListStrategy) assert isinstance(W_ListObject(space, [w(u'a'), w('b')]).strategy, ObjectListStrategy) # mixed unicode and bytes - + def test_empty_to_any(self): space = self.space w = space.wrap @@ -183,7 +186,7 @@ def test_setslice(self): space = self.space w = space.wrap - + l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) l.setslice(0, 1, 2, W_ListObject(space, [w(1), w(2), w(3)])) @@ -286,7 +289,7 @@ def test_empty_setslice_with_objectlist(self): space = self.space w = space.wrap - + l = W_ListObject(space, []) o = W_ListObject(space, [space.wrap(1), space.wrap("2"), space.wrap(3)]) l.setslice(0, 1, o.length(), o) @@ -347,6 +350,13 @@ empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) + r = make_range_list(space, 0, 1, 10) + empty.extend(r) + assert isinstance(empty.strategy, SimpleRangeListStrategy) + assert space.is_true(space.eq(empty.getitem(1), w(1))) + + empty = W_ListObject(space, []) + assert isinstance(empty.strategy, EmptyListStrategy) empty.extend(W_ListObject(space, [w(1), w(2), w(3)])) assert isinstance(empty.strategy, IntegerListStrategy) @@ -397,6 +407,72 @@ l.append(self.space.wrap(19)) assert isinstance(l.strategy, IntegerListStrategy) + def test_simplerangelist(self): + l = make_range_list(self.space, 0, 1, 10) + assert isinstance(l.strategy, SimpleRangeListStrategy) + v = l.pop(5) + assert self.space.eq_w(v, self.space.wrap(5)) + assert isinstance(l.strategy, IntegerListStrategy) + + l = make_range_list(self.space, 0, 1, 10) + assert isinstance(l.strategy, SimpleRangeListStrategy) + v = l.pop(0) + assert self.space.eq_w(v, self.space.wrap(0)) + assert isinstance(l.strategy, IntegerListStrategy) + + l = make_range_list(self.space, 0, 1, 10) + assert isinstance(l.strategy, SimpleRangeListStrategy) + v = l.pop_end() + assert self.space.eq_w(v, self.space.wrap(9)) + assert isinstance(l.strategy, SimpleRangeListStrategy) + v = l.pop_end() + assert self.space.eq_w(v, self.space.wrap(8)) + assert isinstance(l.strategy, SimpleRangeListStrategy) + + l = make_range_list(self.space, 0, 1, 5) + assert isinstance(l.strategy, SimpleRangeListStrategy) + l.append(self.space.wrap("string")) + assert isinstance(l.strategy, ObjectListStrategy) + + l = make_range_list(self.space, 0,1,5) + assert isinstance(l.strategy, SimpleRangeListStrategy) + l.append(self.space.wrap(19)) + assert isinstance(l.strategy, IntegerListStrategy) + + l = make_range_list(self.space, 0,1,5) + assert isinstance(l.strategy, SimpleRangeListStrategy) + assert l.find(self.space.wrap(0)) == 0 + assert l.find(self.space.wrap(4)) == 4 + + try: + l.find(self.space.wrap(5)) + except ValueError: + pass + else: + assert False, "Did not raise ValueError" + + try: + l.find(self.space.wrap(0), 5, 6) + except ValueError: + pass + else: + assert False, "Did not raise ValueError" + + assert l.length() == 5 + + l = make_range_list(self.space, 0, 1, 1) + assert self.space.eq_w(l.pop(0), self.space.wrap(0)) + + l = make_range_list(self.space, 0, 1, 10) + l.sort(False) + assert isinstance(l.strategy, SimpleRangeListStrategy) + + assert self.space.eq_w(l.getitem(5), self.space.wrap(5)) + + l = make_range_list(self.space, 0, 1, 1) + assert self.space.eq_w(l.pop_end(), self.space.wrap(0)) + assert isinstance(l.strategy, EmptyListStrategy) + def test_keep_range(self): # simple list l = make_range_list(self.space, 1,1,5) diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -72,28 +72,43 @@ r.sort(key=lambda x: -x) assert r == range(9, -1, -1) def test_pop(self): + # RangeListStrategy + r = range(1, 10) + res = r.pop() + assert res == 9 + assert self.not_forced(r) + assert repr(r) == repr(range(1, 9)) + res = r.pop(0) + assert res == 1 + assert self.not_forced(r) + assert repr(r) == repr(range(2, 9)) + res = r.pop(len(r) - 1) + assert res == 8 + assert self.not_forced(r) + assert repr(r) == repr(range(2, 8)) + res = r.pop(2) + assert res == 4 + assert not self.not_forced(r) + assert r == [2, 3, 5, 6, 7] + res = r.pop(2) + assert res == 5 + assert not self.not_forced(r) + assert r == [2, 3, 6, 7] + + # SimpleRangeListStrategy r = range(10) res = r.pop() assert res == 9 assert self.not_forced(r) - assert repr(r) == repr(range(9)) + res = r.pop() + assert res == 8 + assert repr(r) == repr(range(8)) + assert self.not_forced(r) res = r.pop(0) assert res == 0 - assert self.not_forced(r) - assert repr(r) == repr(range(1, 9)) - res = r.pop(len(r) - 1) - assert res == 8 - assert self.not_forced(r) - assert repr(r) == repr(range(1, 8)) - res = r.pop(2) - assert res == 3 assert not self.not_forced(r) - assert r == [1, 2, 4, 5, 6, 7] - res = r.pop(2) - assert res == 4 - assert not self.not_forced(r) - assert r == [1, 2, 5, 6, 7] - + assert r == [1, 2, 3, 4, 5, 6, 7] + def test_reduce(self): it = iter(range(10)) assert it.next() == 0 From noreply at buildbot.pypy.org Tue Mar 4 18:52:35 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Tue, 4 Mar 2014 18:52:35 +0100 (CET) Subject: [pypy-commit] pypy simple-range-strategy: Close merged branch Message-ID: <20140304175235.E2C6A1C042F@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: simple-range-strategy Changeset: r69686:c20fe0db3889 Date: 2014-03-04 18:48 +0100 http://bitbucket.org/pypy/pypy/changeset/c20fe0db3889/ Log: Close merged branch From noreply at buildbot.pypy.org Tue Mar 4 19:06:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Mar 2014 19:06:27 +0100 (CET) Subject: [pypy-commit] stmgc default: Bug fix Message-ID: <20140304180627.611EB1C042F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r948:04836bf75796 Date: 2014-03-04 18:41 +0100 http://bitbucket.org/pypy/stmgc/changeset/04836bf75796/ Log: Bug fix diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -393,10 +393,18 @@ assert(pseg->transaction_state != TS_NONE); assert(pseg->safe_point != SP_RUNNING); + assert(pseg->safe_point != SP_NO_TRANSACTION); set_gs_register(get_segment_base(i)); - _do_minor_collection(/*commit=*/ false); - assert(MINOR_NOTHING_TO_DO(pseg)); + + /* Other segments that will abort immediately after resuming: we + have to ignore them, not try to collect them anyway! + Collecting might fail due to invalid state. + */ + if (!must_abort()) { + _do_minor_collection(/*commit=*/ false); + assert(MINOR_NOTHING_TO_DO(pseg)); + } } set_gs_register(get_segment_base(original_num)); From noreply at buildbot.pypy.org Tue Mar 4 19:06:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Mar 2014 19:06:28 +0100 (CET) Subject: [pypy-commit] stmgc default: It seems to get much more likely to crash with a lower value here Message-ID: <20140304180628.858161C042F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r949:dacc33d175bf Date: 2014-03-04 19:06 +0100 http://bitbucket.org/pypy/stmgc/changeset/dacc33d175bf/ Log: It seems to get much more likely to crash with a lower value here diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -8,7 +8,7 @@ #include "stmgc.h" #define NUMTHREADS 3 -#define STEPS_PER_THREAD 5000 +#define STEPS_PER_THREAD 500 #define THREAD_STARTS 100 // how many restarts of threads #define SHARED_ROOTS 3 #define MAXROOTS 1000 From noreply at buildbot.pypy.org Tue Mar 4 19:43:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Mar 2014 19:43:35 +0100 (CET) Subject: [pypy-commit] stmgc default: Goes together with dacc33d175bf to make the crash more likely: a lower nursery size. Message-ID: <20140304184335.9E92C1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r950:72ebf183765d Date: 2014-03-04 19:16 +0100 http://bitbucket.org/pypy/stmgc/changeset/72ebf183765d/ Log: Goes together with dacc33d175bf to make the crash more likely: a lower nursery size. diff --git a/c7/demo/Makefile b/c7/demo/Makefile --- a/c7/demo/Makefile +++ b/c7/demo/Makefile @@ -20,12 +20,12 @@ # note that 'build' is optimized but still contains all asserts debug-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -DSTM_DEBUGPRINT -g -O0 $< -o debug-$* \ - -Wall -Werror ../stmgc.c + clang -I.. -pthread -DSTM_DEBUGPRINT -DSTM_GC_NURSERY=128 -g -O0 \ + $< -o debug-$* -Wall -Werror ../stmgc.c build-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -g -O0 $< -o build-$* \ - -Wall -Werror ../stmgc.c + clang -I.. -pthread -DSTM_GC_NURSERY=128 -g -O0 \ + $< -o build-$* -Wall -Werror ../stmgc.c release-%: %.c ${H_FILES} ${C_FILES} clang -I.. -pthread -g -DNDEBUG -O2 $< -o release-$* \ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -9,12 +9,16 @@ /************************************************************/ +#ifndef STM_GC_NURSERY +# define STM_GC_NURSERY 4096 // 4MB +#endif + #define NB_PAGES (1500*256) // 1500MB #define NB_SEGMENTS 2 #define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) -#define NB_NURSERY_PAGES 1024 // 4MB +#define NB_NURSERY_PAGES (STM_GC_NURSERY/4) #define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_SEGMENTS) #define READMARKER_END ((NB_PAGES * 4096UL) >> 4) diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -18,6 +18,7 @@ assert((NB_PAGES * 4096UL) >> 8 <= (FIRST_OBJECT_PAGE * 4096UL) >> 4); assert((END_NURSERY_PAGE * 4096UL) >> 8 <= (FIRST_READMARKER_PAGE * 4096UL)); + assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); stm_object_pages = mmap(NULL, TOTAL_MEMORY, PROT_READ | PROT_WRITE, From noreply at buildbot.pypy.org Tue Mar 4 19:43:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Mar 2014 19:43:36 +0100 (CET) Subject: [pypy-commit] stmgc default: Still debugging, I think it's safer to set this to 0 for now Message-ID: <20140304184336.C68DE1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r951:f08576723910 Date: 2014-03-04 19:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/f08576723910/ Log: Still debugging, I think it's safer to set this to 0 for now diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -15,7 +15,7 @@ #define GC_MAJOR_COLLECT 1.82 /* re-share pages after major collections (1 or 0) */ -#define RESHARE_PAGES 1 +#define RESHARE_PAGES 0 From noreply at buildbot.pypy.org Tue Mar 4 19:43:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Mar 2014 19:43:37 +0100 (CET) Subject: [pypy-commit] stmgc default: A final clean-up round of bug fixes. Now demo_random seems to pass Message-ID: <20140304184337.DD8AC1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r952:37981d65bc52 Date: 2014-03-04 19:43 +0100 http://bitbucket.org/pypy/stmgc/changeset/37981d65bc52/ Log: A final clean-up round of bug fixes. Now demo_random seems to pass again. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -215,8 +215,8 @@ We don't have to wake it up right now, but we know it will abort as soon as it wakes up. We can safely force it to reset its state now. */ - dprintf(("reset other modified\n")); - reset_modified_from_other_segments(other_segment_num); + dprintf(("killing data structures\n")); + abort_data_structures_from_segment_num(other_segment_num); } dprintf(("killed other thread\n")); } diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -448,6 +448,29 @@ list_clear(pseg->modified_old_objects); } +static void abort_data_structures_from_segment_num(int segment_num) +{ + /* This function clears the content of the given segment undergoing + an abort. It is called from abort_with_mutex(), but also sometimes + from other threads that figure out that this segment should abort. + In the latter case, make sure that this segment is currently at + a safe point (not SP_RUNNING). + */ + struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); + + /* throw away the content of the nursery */ + throw_away_nursery(pseg); + + /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ + reset_modified_from_other_segments(segment_num); + + /* reset the tl->shadowstack and thread_local_obj to their original + value before the transaction start */ + stm_thread_local_t *tl = pseg->pub.running_thread; + tl->shadowstack = pseg->shadowstack_at_start_of_transaction; + tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; +} + static void abort_with_mutex(void) { dprintf(("~~~ ABORT\n")); @@ -463,16 +486,9 @@ } assert(STM_PSEGMENT->running_pthread == pthread_self()); - /* throw away the content of the nursery */ - throw_away_nursery(); - - /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ - reset_modified_from_other_segments(STM_SEGMENT->segment_num); + abort_data_structures_from_segment_num(STM_SEGMENT->segment_num); stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; - stm_thread_local_t *tl = STM_SEGMENT->running_thread; - tl->shadowstack = STM_PSEGMENT->shadowstack_at_start_of_transaction; - tl->thread_local_obj = STM_PSEGMENT->threadlocal_at_start_of_transaction; if (STM_SEGMENT->nursery_end == NSE_SIGABORT) STM_SEGMENT->nursery_end = NURSERY_END; /* done aborting */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -183,6 +183,7 @@ static void teardown_core(void); static void abort_with_mutex(void) __attribute__((noreturn)); +static void abort_data_structures_from_segment_num(int segment_num); static inline bool was_read_remote(char *base, object_t *obj, uint8_t other_transaction_read_version) diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -196,29 +196,29 @@ _collect_now(item)); } -static void throw_away_nursery(void) +static void throw_away_nursery(struct stm_priv_segment_info_s *pseg) { /* reset the nursery by zeroing it */ size_t size; char *realnursery; - realnursery = REAL_ADDRESS(STM_SEGMENT->segment_base, _stm_nursery_start); - size = STM_SEGMENT->nursery_current - (stm_char *)_stm_nursery_start; + realnursery = REAL_ADDRESS(pseg->pub.segment_base, _stm_nursery_start); + size = pseg->pub.nursery_current - (stm_char *)_stm_nursery_start; memset(realnursery, 0, size); - STM_SEGMENT->nursery_current = (stm_char *)_stm_nursery_start; + pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; /* free any object left from 'young_outside_nursery' */ - if (!tree_is_cleared(STM_PSEGMENT->young_outside_nursery)) { + if (!tree_is_cleared(pseg->young_outside_nursery)) { bool locked = false; wlog_t *item; - TREE_LOOP_FORWARD(*STM_PSEGMENT->young_outside_nursery, item) { + TREE_LOOP_FORWARD(*pseg->young_outside_nursery, item) { assert(!_is_in_nursery((object_t *)item->addr)); if (!locked) { mutex_pages_lock(); locked = true; } - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base,item->addr); + char *realobj = REAL_ADDRESS(pseg->pub.segment_base, item->addr); ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); _stm_large_free(stm_object_pages + item->addr); @@ -227,7 +227,7 @@ if (locked) mutex_pages_unlock(); - tree_clear(STM_PSEGMENT->young_outside_nursery); + tree_clear(pseg->young_outside_nursery); } } @@ -277,7 +277,7 @@ collect_oldrefs_to_nursery(); - throw_away_nursery(); + throw_away_nursery(get_priv_segment(STM_SEGMENT->segment_num)); assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery)); @@ -405,6 +405,10 @@ _do_minor_collection(/*commit=*/ false); assert(MINOR_NOTHING_TO_DO(pseg)); } + else { + dprintf(("abort data structures\n")); + abort_data_structures_from_segment_num(i); + } } set_gs_register(get_segment_base(original_num)); diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -11,7 +11,7 @@ static void minor_collection(bool commit); static void check_nursery_at_transaction_start(void); -static void throw_away_nursery(void); +static void throw_away_nursery(struct stm_priv_segment_info_s *pseg); static void major_do_minor_collections(void); static inline bool must_abort(void) { From noreply at buildbot.pypy.org Tue Mar 4 19:51:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Mar 2014 19:51:42 +0100 (CET) Subject: [pypy-commit] stmgc default: Extra comment Message-ID: <20140304185142.4818D1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r953:79112a4f530d Date: 2014-03-04 19:51 +0100 http://bitbucket.org/pypy/stmgc/changeset/79112a4f530d/ Log: Extra comment diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -454,7 +454,9 @@ an abort. It is called from abort_with_mutex(), but also sometimes from other threads that figure out that this segment should abort. In the latter case, make sure that this segment is currently at - a safe point (not SP_RUNNING). + a safe point (not SP_RUNNING). Note that in such cases this + function is called more than once for the same segment, but it + should not matter. */ struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); From noreply at buildbot.pypy.org Tue Mar 4 21:16:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 21:16:42 +0100 (CET) Subject: [pypy-commit] pypy default: replace a couple usages of streamio Message-ID: <20140304201642.5806F1C02AE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69687:d1a51abf3b33 Date: 2014-03-04 14:53 -0500 http://bitbucket.org/pypy/pypy/changeset/d1a51abf3b33/ Log: replace a couple usages of streamio diff --git a/pypy/module/_pypyjson/targetjson.py b/pypy/module/_pypyjson/targetjson.py --- a/pypy/module/_pypyjson/targetjson.py +++ b/pypy/module/_pypyjson/targetjson.py @@ -4,12 +4,10 @@ sys.path.insert(0, str(ROOT)) import time -from rpython.rlib.streamio import open_file_as_stream from pypy.interpreter.error import OperationError from pypy.module._pypyjson.interp_decoder import loads - ## MSG = open('msg.json').read() class W_Root(object): @@ -92,7 +90,7 @@ def wrapfloat(self, x): return W_Float(x) - + def wrap(self, x): if isinstance(x, int): return W_Int(x) @@ -109,7 +107,6 @@ def myloads(msg): return loads(fakespace, W_String(msg)) - def bench(title, N, fn, arg): a = time.clock() @@ -124,14 +121,14 @@ return 1 filename = argv[1] N = int(argv[2]) - f = open_file_as_stream(filename) - msg = f.readall() - + f = open(filename) + msg = f.read() + try: bench('loads ', N, myloads, msg) except OperationError, e: print 'Error', e._compute_value(fakespace) - + return 0 # _____ Define and setup target ___ diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -1,7 +1,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError from rpython.rlib import rgc -from rpython.rlib.streamio import open_file_as_stream + @unwrap_spec(generation=int) def collect(space, generation=0): @@ -56,7 +56,7 @@ if not tb: raise OperationError(space.w_RuntimeError, space.wrap("Wrong GC")) - f = open_file_as_stream(filename, mode="w") + f = open(filename, mode="w") for i in range(len(tb)): f.write("%d %d " % (tb[i].count, tb[i].size)) f.write(",".join([str(tb[i].links[j]) for j in range(len(tb))]) + "\n") diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py --- a/pypy/module/gc/test/test_gc.py +++ b/pypy/module/gc/test/test_gc.py @@ -1,5 +1,6 @@ import py + class AppTestGC(object): def test_collect(self): import gc @@ -69,6 +70,7 @@ gc.enable() assert gc.isenabled() + class AppTestGcDumpHeap(object): pytestmark = py.test.mark.xfail(run=False) @@ -81,10 +83,10 @@ self.count = count self.size = size self.links = links - + def fake_heap_stats(): return [X(1, 12, [0, 0]), X(2, 10, [10, 0])] - + cls._heap_stats = rgc._heap_stats rgc._heap_stats = fake_heap_stats fname = udir.join('gcdump.log') @@ -94,10 +96,10 @@ def teardown_class(cls): import py from rpython.rlib import rgc - + rgc._heap_stats = cls._heap_stats assert py.path.local(cls._fname).read() == '1 12 0,0\n2 10 10,0\n' - + def test_gc_heap_stats(self): import gc gc.dump_heap_stats(self.fname) @@ -124,6 +126,7 @@ for r in rlist: assert r() is None + class AppTestGcMapDictIndexCache(AppTestGcMethodCache): spaceconfig = {"objspace.std.withmethodcache": True, "objspace.std.withmapdict": True} From noreply at buildbot.pypy.org Tue Mar 4 21:35:50 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 4 Mar 2014 21:35:50 +0100 (CET) Subject: [pypy-commit] pypy default: Remove unnecessary 'from __future__ import with_statement'. Message-ID: <20140304203550.7CE601C35E0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r69688:18dc8356b0d9 Date: 2014-03-04 21:35 +0100 http://bitbucket.org/pypy/pypy/changeset/18dc8356b0d9/ Log: Remove unnecessary 'from __future__ import with_statement'. diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -8,8 +8,6 @@ (gdb) python execfile('/path/to/gdb_pypy.py') """ -from __future__ import with_statement - import re import sys import os.path From noreply at buildbot.pypy.org Tue Mar 4 21:38:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 21:38:17 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: cleanups for socket module Message-ID: <20140304203817.DEAA31D2515@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69689:f6af5202851c Date: 2014-03-04 06:42 -0500 http://bitbucket.org/pypy/pypy/changeset/f6af5202851c/ Log: cleanups for socket module diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -10,13 +10,6 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter import gateway -class SignalChecker: - def __init__(self, space): - self.space = space - - def check(self): - self.space.getexecutioncontext().checksignals() - # XXX Hack to seperate rpython and pypy def addr_as_object(addr, fd, space): @@ -197,7 +190,7 @@ def connect_ex_w(self, space, w_addr): """connect_ex(address) -> errno - + This is like connect(address), but returns an error code (the errno value) instead of raising an exception when an error occurs. """ @@ -213,7 +206,7 @@ return self.dup(W_RSocket) except SocketError, e: raise converted_error(space, e) - + def fileno_w(self, space): """fileno() -> integer @@ -350,7 +343,7 @@ to tell how much data has been sent. """ try: - count = self.sendall(data, flags, SignalChecker(space)) + self.sendall(data, flags, space.getexecutioncontext().checksignals) except SocketError, e: raise converted_error(space, e) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -533,10 +533,10 @@ s.connect(("www.python.org", 80)) except _socket.gaierror, ex: skip("GAIError - probably no connection: %s" % str(ex.args)) - s.send(buffer('')) - s.sendall(buffer('')) - s.send(u'') - s.sendall(u'') + assert s.send(buffer('')) == 0 + assert s.sendall(buffer('')) is None + assert s.send(u'') == 0 + assert s.sendall(u'') is None raises(UnicodeEncodeError, s.send, u'\xe9') s.close() s = _socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM, 0) @@ -579,7 +579,7 @@ cls.space = space HOST = 'localhost' - + def setup_method(self, method): w_HOST = space.wrap(self.HOST) self.w_serv = space.appexec([w_socket, w_HOST], @@ -625,8 +625,8 @@ buf = t.recv(1) assert buf == '!' # test that sendall() works - cli.sendall('?') - assert count == 1 + count = cli.sendall('?') + assert count is None buf = t.recv(1) assert buf == '?' # test send() timeout @@ -636,7 +636,7 @@ count += cli.send('foobar' * 70) except timeout: pass - t.recv(count) + t.recv(count) # test sendall() timeout try: while 1: diff --git a/pypy/module/_socket/test/test_ztranslation.py b/pypy/module/_socket/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_socket/test/test_ztranslation.py @@ -0,0 +1,5 @@ +from pypy.objspace.fake.checkmodule import checkmodule + + +def test_checkmodule(): + checkmodule('_socket') diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -898,8 +898,8 @@ except CSocketError, e: if e.errno != _c.EINTR: raise - if signal_checker: - signal_checker.check() + if signal_checker is not None: + signal_checker() finally: rffi.free_nonmovingbuffer(data, dataptr) From noreply at buildbot.pypy.org Tue Mar 4 22:58:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 22:58:54 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: cleanup Message-ID: <20140304215854.5ACA01C3619@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69690:f8175a0093e2 Date: 2014-03-04 16:22 -0500 http://bitbucket.org/pypy/pypy/changeset/f8175a0093e2/ Log: cleanup diff --git a/rpython/rlib/rsre/rpy/_sre.py b/rpython/rlib/rsre/rpy/_sre.py --- a/rpython/rlib/rsre/rpy/_sre.py +++ b/rpython/rlib/rsre/rpy/_sre.py @@ -1,4 +1,3 @@ -import sys from rpython.rlib.rsre import rsre_char from rpython.rlib.rarithmetic import intmask diff --git a/rpython/rlib/rsre/rsre_char.py b/rpython/rlib/rsre/rsre_char.py --- a/rpython/rlib/rsre/rsre_char.py +++ b/rpython/rlib/rsre/rsre_char.py @@ -5,7 +5,7 @@ from rpython.rlib.rlocale import tolower, isalnum from rpython.rlib.unroll import unrolling_iterable from rpython.rlib import jit -from rpython.rlib.rarithmetic import int_between, intmask +from rpython.rlib.rarithmetic import int_between # Note: the unicode parts of this module require you to call # rsre_char.set_unicode_db() first, to select one of the modules @@ -27,9 +27,9 @@ MAGIC = 20031017 if sys.maxint > 2**32: - MAXREPEAT = intmask(2**32 - 1) + MAXREPEAT = int(2**32 - 1) else: - MAXREPEAT = intmask(2**31 - 1) + MAXREPEAT = int(2**31 - 1) # In _sre.c this is bytesize of the code word type of the C implementation. # There it's 2 for normal Python builds and more for wide unicode builds (large From noreply at buildbot.pypy.org Tue Mar 4 22:58:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 22:58:55 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: move this test to test_textio.py so it has _locale module Message-ID: <20140304215855.BD4CA1C3619@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69691:75626e530f39 Date: 2014-03-04 16:57 -0500 http://bitbucket.org/pypy/pypy/changeset/75626e530f39/ Log: move this test to test_textio.py so it has _locale module needed for locale.getpreferredencoding() on posix if LANG env vars are not set diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -99,13 +99,6 @@ f.close() f2.close() - def test_writelines_error(self): - import _io - txt = _io.TextIOWrapper(_io.BytesIO()) - raises(TypeError, txt.writelines, [1, 2, 3]) - raises(TypeError, txt.writelines, None) - raises(TypeError, txt.writelines, b'abc') - def test_seek(self): import _io f = _io.FileIO(self.tmpfile, 'rb') diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -111,6 +111,13 @@ assert f.read() == data * 2 assert buf.getvalue() == (data * 2).encode(encoding) + def test_writelines_error(self): + import _io + txt = _io.TextIOWrapper(_io.BytesIO()) + raises(TypeError, txt.writelines, [1, 2, 3]) + raises(TypeError, txt.writelines, None) + raises(TypeError, txt.writelines, b'abc') + def test_tell(self): import _io r = _io.BytesIO("abc\ndef\n") From noreply at buildbot.pypy.org Tue Mar 4 23:12:29 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 4 Mar 2014 23:12:29 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix charmap decode running on narrow python Message-ID: <20140304221229.B712F1C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69692:0441dc0230a3 Date: 2014-03-04 22:11 +0000 http://bitbucket.org/pypy/pypy/changeset/0441dc0230a3/ Log: fix charmap decode running on narrow python diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -1,6 +1,7 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rstring import UnicodeBuilder +from rpython.rlib.runicode import UNICHR from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault @@ -534,7 +535,7 @@ if not 0 <= x <= 0x10FFFF: raise oefmt(space.w_TypeError, "character mapping must be in range(0x110000)") - return unichr(x) + return UNICHR(x) elif space.is_w(w_ch, space.w_None): # Charmap may return None return errorchar From noreply at buildbot.pypy.org Wed Mar 5 00:06:04 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 00:06:04 +0100 (CET) Subject: [pypy-commit] pypy default: have gdb_pypy.load_typeids used a named temporary file rather than /tmp/typeids.txt.z Message-ID: <20140304230604.920C91C0483@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69693:59650f378e7c Date: 2014-03-04 18:04 -0500 http://bitbucket.org/pypy/pypy/changeset/59650f378e7c/ Log: have gdb_pypy.load_typeids used a named temporary file rather than /tmp/typeids.txt.z diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -118,15 +118,16 @@ """ Returns a mapping offset --> description """ + import tempfile + import zlib vname = 'pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_typeids_z' length = int(self.gdb.parse_and_eval('*(long*)%s' % vname)) vstart = '(char*)(((long*)%s)+1)' % vname - self.gdb.execute('dump binary memory /tmp/typeids.txt.z %s %s+%d' - % (vstart, vstart, length)) - s = open('/tmp/typeids.txt.z', 'rb').read() - import zlib; typeids_txt = zlib.decompress(s) - typeids = TypeIdsMap(typeids_txt.splitlines(True), self.gdb) - return typeids + with tempfile.NamedTemporaryFile('rb') as fobj: + self.gdb.execute('dump binary memory %s %s %s+%d' % + (fobj.name, vstart, vstart, length)) + data = fobj.read() + return TypeIdsMap(zlib.decompress(data).splitlines(True), self.gdb) class TypeIdsMap(object): From noreply at buildbot.pypy.org Wed Mar 5 01:02:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 01:02:09 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_list_strategy Message-ID: <20140305000209.75C0A1C10A8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69694:a49d087de008 Date: 2014-03-04 19:01 -0500 http://bitbucket.org/pypy/pypy/changeset/a49d087de008/ Log: fix test_list_strategy diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -57,6 +57,8 @@ l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" l = range(3) + assert list_strategy(l) == "simple_range" + l = range(1, 2) assert list_strategy(l) == "range" l = [1, "b", 3] assert list_strategy(l) == "object" From noreply at buildbot.pypy.org Wed Mar 5 01:50:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 01:50:22 +0100 (CET) Subject: [pypy-commit] pypy default: fix getitem with negative indices on the new SimpleRangeList Message-ID: <20140305005022.B40001C315E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69695:69b0efd529c2 Date: 2014-03-04 19:49 -0500 http://bitbucket.org/pypy/pypy/changeset/69b0efd529c2/ Log: fix getitem with negative indices on the new SimpleRangeList diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -142,7 +142,6 @@ class W_ListObject(W_Root): - strategy = None def __init__(self, space, wrappeditems, sizehint=-1): @@ -1128,10 +1127,13 @@ def _getitem_unwrapped(self, w_list, i): length = self.unerase(w_list.lstorage)[0] assert length > 0 - if 0 <= i < length: - return i - else: + if i < 0: + i += length + if i < 0: + raise IndexError + elif i >= length: raise IndexError + return i @specialize.arg(2) def _getitems_range(self, w_list, wrap_items): diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -166,7 +166,6 @@ self.space.setitem(w_lhslist, w_slice, w_rhslist) assert self.space.unwrap(w_lhslist) == expected - test1([5,7,1,4], 1, 3, [9,8], [5,9,8,4]) test1([5,7,1,4], 1, 3, [9], [5,9,4]) test1([5,7,1,4], 1, 3, [9,8,6],[5,9,8,6,4]) @@ -294,6 +293,7 @@ self.space.w_True) assert self.space.eq_w(self.space.eq(w_list2, w_list3), self.space.w_False) + def test_ne(self): w = self.space.wrap @@ -312,6 +312,7 @@ self.space.w_False) assert self.space.eq_w(self.space.ne(w_list2, w_list3), self.space.w_True) + def test_lt(self): w = self.space.wrap @@ -429,6 +430,7 @@ with py.test.raises(ValueError): intlist.find(w(4), 0, 2) + class AppTestW_ListObject(object): def setup_class(cls): import platform @@ -662,7 +664,6 @@ raises(IndexError, "l[1]") def test_setitem(self): - l = [] raises(IndexError, "l[1] = 2") @@ -861,7 +862,6 @@ raises(TypeError, "[0]*MyInt(3)") raises(TypeError, "[0]*MyIndex(MyInt(3))") - def test_index(self): c = range(10) assert c.index(0) == 0 @@ -1318,6 +1318,8 @@ assert ([5] >= [N]) is False def test_resizelist_hint(self): + if self.on_cpython: + skip('pypy-only test') import __pypy__ l2 = [] __pypy__.resizelist_hint(l2, 100) @@ -1326,6 +1328,8 @@ assert len(l1) == 0 def test_use_method_for_wrong_object(self): + if self.on_cpython: + skip('pypy-only test') raises(TypeError, list.append.im_func, 1, 2) def test_ne_NotImplemented(self): @@ -1439,7 +1443,20 @@ def test_getitem(self): l = range(5) - raises(IndexError, "l[-10]") + raises(IndexError, "l[-6]") + raises(IndexError, "l[5]") + assert l[0] == 0 + assert l[-1] == 4 + assert l[-2] == 3 + assert l[-5] == 0 + + l = range(1, 5) + raises(IndexError, "l[-5]") + raises(IndexError, "l[4]") + assert l[0] == 1 + assert l[-1] == 4 + assert l[-2] == 3 + assert l[-4] == 1 def test_append(self): l = range(5) @@ -1515,6 +1532,7 @@ notshared = l[:] assert notshared == [] + class AppTestListFastSubscr: spaceconfig = {"objspace.std.optimized_list_getitem": True} From noreply at buildbot.pypy.org Wed Mar 5 03:13:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 03:13:22 +0100 (CET) Subject: [pypy-commit] pypy default: cleanups Message-ID: <20140305021322.5BACA1C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69696:19c21c84e468 Date: 2014-03-04 20:57 -0500 http://bitbucket.org/pypy/pypy/changeset/19c21c84e468/ Log: cleanups diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -1,4 +1,3 @@ - """ Interpreter-level implementation of structure, exposing ll-structure to app-level with apropriate interface """ @@ -20,6 +19,7 @@ from rpython.rlib.rarithmetic import intmask, signedtype, widen from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong + def unpack_fields(space, w_fields): fields_w = space.unpackiterable(w_fields) fields = [] diff --git a/pypy/module/gc/__init__.py b/pypy/module/gc/__init__.py --- a/pypy/module/gc/__init__.py +++ b/pypy/module/gc/__init__.py @@ -1,5 +1,6 @@ from pypy.interpreter.mixedmodule import MixedModule - + + class Module(MixedModule): interpleveldefs = { 'collect': 'interp_gc.collect', @@ -8,15 +9,14 @@ 'isenabled': 'interp_gc.isenabled', 'enable_finalizers': 'interp_gc.enable_finalizers', 'disable_finalizers': 'interp_gc.disable_finalizers', - 'garbage' : 'space.newlist([])', + 'garbage': 'space.newlist([])', #'dump_heap_stats': 'interp_gc.dump_heap_stats', } - appleveldefs = { - } + appleveldefs = {} def __init__(self, space, w_name): if (not space.config.translating or - space.config.translation.gctransformer == "framework"): + space.config.translation.gctransformer == "framework"): self.appleveldefs.update({ 'dump_rpy_heap': 'app_referents.dump_rpy_heap', }) diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -1,4 +1,3 @@ - """ This file makes open() and friends RPython. Note that RFile should not be used directly and instead it's magically appearing each time you call python builtin open() @@ -17,27 +16,27 @@ def llexternal(*args, **kwargs): return rffi.llexternal(*args, compilation_info=eci, **kwargs) -FILE = lltype.Struct('FILE') # opaque type maybe - class CConfig(object): _compilation_info_ = eci off_t = platform.SimpleType('off_t') +config = platform.configure(CConfig) -CC = platform.configure(CConfig) -OFF_T = CC['off_t'] +OFF_T = config['off_t'] +FILE = lltype.Struct('FILE') # opaque type maybe + c_open = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) c_close = llexternal('fclose', [lltype.Ptr(FILE)], rffi.INT) c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, - lltype.Ptr(FILE)], rffi.SIZE_T) + lltype.Ptr(FILE)], rffi.SIZE_T) c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, - lltype.Ptr(FILE)], rffi.SIZE_T) + lltype.Ptr(FILE)], rffi.SIZE_T) c_feof = llexternal('feof', [lltype.Ptr(FILE)], rffi.INT) c_ferror = llexternal('ferror', [lltype.Ptr(FILE)], rffi.INT) c_clearerror = llexternal('clearerr', [lltype.Ptr(FILE)], lltype.Void) c_fseek = llexternal('fseek', [lltype.Ptr(FILE), rffi.LONG, rffi.INT], - rffi.INT) + rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) c_fileno = llexternal('fileno', [lltype.Ptr(FILE)], rffi.INT) c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) @@ -53,6 +52,13 @@ BASE_BUF_SIZE = 4096 BASE_LINE_SIZE = 100 + +def _error(ll_file): + errno = c_ferror(ll_file) + c_clearerror(ll_file) + raise OSError(errno, os.strerror(errno)) + + def create_file(filename, mode="r", buffering=-1): assert buffering == -1 assert filename is not None @@ -71,6 +77,7 @@ lltype.free(ll_name, flavor='raw') return RFile(ll_f) + def create_temp_rfile(): res = c_tmpfile() if not res: @@ -78,6 +85,7 @@ raise OSError(errno, os.strerror(errno)) return RFile(res) + def create_popen_file(command, type): ll_command = rffi.str2charp(command) try: @@ -93,6 +101,7 @@ lltype.free(ll_command, flavor='raw') return RPopenFile(ll_f) + class RFile(object): def __init__(self, ll_file): self.ll_file = ll_file @@ -224,7 +233,7 @@ while raw_buf[strlen] != '\0': strlen += 1 if (strlen == BASE_LINE_SIZE - 1 and - raw_buf[BASE_LINE_SIZE - 2] != '\n'): + raw_buf[BASE_LINE_SIZE - 2] != '\n'): return -1 # overflow! # common case return strlen @@ -255,9 +264,3 @@ class RPopenFile(RFile): _do_close = staticmethod(c_pclose) - - -def _error(ll_file): - errno = c_ferror(ll_file) - c_clearerror(ll_file) - raise OSError(errno, os.strerror(errno)) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -7,6 +7,7 @@ from rpython.rlib import jit from rpython.translator.platform import platform + class CConstantErrno(CConstant): # these accessors are used when calling get_errno() or set_errno() # on top of CPython @@ -20,6 +21,7 @@ def __setitem__(self, index, value): assert index == 0 ll2ctypes.TLS.errno = value + if os.name == 'nt': if platform.name == 'msvc': includes=['errno.h','stdio.h'] diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -1,4 +1,3 @@ - from rpython.annotator import model as annmodel from rpython.flowspace.model import Constant from rpython.rlib import rarithmetic, objectmodel diff --git a/rpython/rtyper/tool/rffi_platform.py b/rpython/rtyper/tool/rffi_platform.py --- a/rpython/rtyper/tool/rffi_platform.py +++ b/rpython/rtyper/tool/rffi_platform.py @@ -198,12 +198,14 @@ """ for attr in ['_includes_', '_libraries_', '_sources_', '_library_dirs_', '_include_dirs_', '_header_']: - assert not hasattr(CConfig, attr), "Found legacy attribute %s on CConfig" % (attr,) + assert not hasattr(CConfig, attr), \ + "Found legacy attribute %s on CConfig" % attr + entries = [] for key in dir(CConfig): value = getattr(CConfig, key) if isinstance(value, CConfigEntry): - entries.append((key, value)) + entries.append((key, value)) if entries: # can be empty if there are only CConfigSingleEntries writer = _CWriter(CConfig) From noreply at buildbot.pypy.org Wed Mar 5 03:44:54 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 5 Mar 2014 03:44:54 +0100 (CET) Subject: [pypy-commit] pypy default: cpython issue1811: improve truediv accuracy on larger ints Message-ID: <20140305024454.DB8671D2692@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69697:339ee58d7163 Date: 2014-03-04 18:42 -0800 http://bitbucket.org/pypy/pypy/changeset/339ee58d7163/ Log: cpython issue1811: improve truediv accuracy on larger ints diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -13,6 +13,7 @@ from rpython.rlib.rarithmetic import ( LONG_BIT, is_valid_int, ovfcheck, r_longlong, r_uint, string_to_int) from rpython.rlib.rbigint import rbigint +from rpython.rlib.rfloat import DBL_MANT_DIG from rpython.rlib.rstring import ( InvalidBaseError, ParseStringError, ParseStringOverflowError) from rpython.tool.sourcetools import func_renamer, func_with_new_name @@ -163,10 +164,18 @@ def _truediv(space, x, y): + if not y: + raise oefmt(space.w_ZeroDivisionError, "division by zero") + + if (DBL_MANT_DIG < LONG_BIT and + (r_uint(abs(x)) >> DBL_MANT_DIG or r_uint(abs(y)) >> DBL_MANT_DIG)): + # large x or y, use long arithmetic + raise OverflowError + + # both ints can be exactly represented as doubles, do a + # floating-point division a = float(x) b = float(y) - if b == 0.0: - raise oefmt(space.w_ZeroDivisionError, "division by zero") return space.wrap(a / b) @@ -589,7 +598,7 @@ descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) descr_div, descr_rdiv = _make_descr_binop(_div) - descr_truediv, descr_rtruediv = _make_descr_binop(_truediv, ovf=False) + descr_truediv, descr_rtruediv = _make_descr_binop(_truediv) descr_mod, descr_rmod = _make_descr_binop(_mod) descr_divmod, descr_rdivmod = _make_descr_binop( _divmod, ovf2small=_divmod_ovf2small) diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -563,6 +563,25 @@ base = MyInt(24) assert int('10', base) == 24 + def test_truediv(self): + import operator + x = 1000000 + a = x / 2 + assert a == 500000 + a = operator.truediv(x, 2) + assert a == 500000.0 + + x = 63050394783186940 + a = x / 7 + assert a == 9007199254740991 + a = operator.truediv(x, 7) + assert a == 9007199254740991.0 + exec("from __future__ import division; " + "a = x / 7; b = operator.truediv(x, 7)") + assert a == 9007199254740991.0 + assert b == 9007199254740991.0 + + class AppTestIntShortcut(AppTestInt): spaceconfig = {"objspace.std.intshortcut": True} From noreply at buildbot.pypy.org Wed Mar 5 03:44:56 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 5 Mar 2014 03:44:56 +0100 (CET) Subject: [pypy-commit] pypy py3k: hack around appdirect failures Message-ID: <20140305024456.527491D2692@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69698:b2e2ac51eb19 Date: 2014-03-04 11:59 -0800 http://bitbucket.org/pypy/pypy/changeset/b2e2ac51eb19/ Log: hack around appdirect failures diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -43,12 +43,11 @@ class AppTestLong: - def setup_class(cls): - from pypy.interpreter import gateway - from pypy.objspace.std.longobject import W_LongObject - def w__long(space, w_obj): - return W_LongObject.fromint(space, space.int_w(w_obj)) - cls.w__long = cls.space.wrap(gateway.interp2app(w__long)) + def w__long(self, obj): + import sys + # XXX: currently returns a W_LongObject but might return + # W_IntObject in the future + return obj + sys.maxsize - sys.maxsize def test_trunc(self): import math @@ -401,6 +400,8 @@ def test_large_identity(self): import sys + if '__pypy__' not in sys.builtin_module_names: + skip('PyPy only') a = sys.maxsize + 1 b = sys.maxsize + 2 assert a is not b From noreply at buildbot.pypy.org Wed Mar 5 03:57:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 03:57:53 +0100 (CET) Subject: [pypy-commit] pypy default: speed up test_select by defining socket buffer sizes Message-ID: <20140305025753.6018A1D2692@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69699:a4ea282a125e Date: 2014-03-04 21:53 -0500 http://bitbucket.org/pypy/pypy/changeset/a4ea282a125e/ Log: speed up test_select by defining socket buffer sizes diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -245,7 +245,8 @@ class AppTestSelectWithSockets(_AppTestSelect): """Same tests with connected sockets. socket.socketpair() does not exists on win32, - so we start our own server.""" + so we start our own server. + """ spaceconfig = { "usemodules": ["select", "_socket", "rctime", "thread"], } @@ -267,7 +268,7 @@ except OperationError, e: # should get a "Permission denied" if not e.match(space, space.getattr(w_socketmod, space.wrap("error"))): raise - print e + print e.errorstr(space) except cls.w_sock_err, e: # should get a "Permission denied" print e else: @@ -283,4 +284,8 @@ thread.start_new_thread(s2.connect, (self.sockaddress,)) s1, addr2 = self.sock.accept() + # speed up the tests that want to fill the buffers + s1.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 4096) + s2.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 4096) + return s1, s2 From noreply at buildbot.pypy.org Wed Mar 5 05:39:03 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 05:39:03 +0100 (CET) Subject: [pypy-commit] pypy default: update test_pypy_c for SimpleRangeList Message-ID: <20140305043903.7A6F81C0483@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69700:5115c7557c07 Date: 2014-03-04 22:34 -0500 http://bitbucket.org/pypy/pypy/changeset/5115c7557c07/ Log: update test_pypy_c for SimpleRangeList diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -2,7 +2,7 @@ class TestGenerators(BaseTestPyPyC): - def test_simple_generator(self): + def test_simple_generator1(self): def main(n): def f(): for i in range(10000): @@ -28,7 +28,36 @@ jump(..., descr=...) """) assert loop.match_by_id("subtract", """ - setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + i2 = int_sub(i1, 42) + """) + + def test_simple_generator2(self): + def main(n): + def f(): + for i in range(1, 10000): + i -= 1 + i -= 42 # ID: subtract + yield i + + def g(): + for i in f(): # ID: generator + pass + + g() + + log = self.run(main, [500]) + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id("generator", """ + cond_call(..., descr=...) + i16 = force_token() + p45 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p45, i29, descr=) + setarrayitem_gc(p8, 0, p45, descr=) + i47 = arraylen_gc(p8, descr=) # Should be removed by backend + jump(..., descr=...) + """) + assert loop.match_by_id("subtract", """ + setfield_gc(p7, 38, descr=<.*last_instr .*>) # XXX bad, kill me i2 = int_sub_ovf(i1, 42) guard_no_overflow(descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -160,7 +160,7 @@ jump(..., descr=...) """) - def test_range_iter(self): + def test_range_iter_simple(self): def main(n): def g(n): return range(n) @@ -178,6 +178,36 @@ guard_not_invalidated? i16 = int_ge(i11, i12) guard_false(i16, descr=...) + i20 = int_add(i11, 1) + i21 = force_token() + setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) + guard_not_invalidated? + i25 = int_ge(i11, i9) + guard_false(i25, descr=...) + i27 = int_add_ovf(i7, i11) + guard_no_overflow(descr=...) + --TICK-- + jump(..., descr=...) + """) + + def test_range_iter_normal(self): + def main(n): + def g(n): + return range(n) + s = 0 + for i in range(1, n): # ID: for + tmp = g(n) + s += tmp[i] # ID: getitem + a = 0 + return s + # + log = self.run(main, [1000]) + assert log.result == 1000 * 999 / 2 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + guard_not_invalidated? + i16 = int_ge(i11, i12) + guard_false(i16, descr=...) i17 = int_mul(i11, i14) i18 = int_add(i15, i17) i20 = int_add(i11, 1) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -45,7 +45,6 @@ storage = strategy.erase(None) elif start == 0 and step == 1: strategy = space.fromcache(SimpleRangeListStrategy) - assert length > 0 storage = strategy.erase((length,)) else: strategy = space.fromcache(RangeListStrategy) @@ -1126,7 +1125,6 @@ def _getitem_unwrapped(self, w_list, i): length = self.unerase(w_list.lstorage)[0] - assert length > 0 if i < 0: i += length if i < 0: From noreply at buildbot.pypy.org Wed Mar 5 05:39:05 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 05:39:05 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: merge default Message-ID: <20140305043905.373161C0483@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69701:204b726bdfe3 Date: 2014-03-04 22:50 -0500 http://bitbucket.org/pypy/pypy/changeset/204b726bdfe3/ Log: merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -95,5 +95,10 @@ .. branch: test-58c3d8552833 Fix for getarrayitem_gc_pure optimization +.. branch: simple-range-strategy +Implements SimpleRangeListStrategy for case range(n) where n is a positive number. +Makes some traces nicer by getting rid of multiplication for calculating loop counter +and propagates that n > 0 further to get rid of guards. + .. branch: stdlib-2.7.5 .. branch: vendor/stdlib diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -57,6 +57,8 @@ l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" l = range(3) + assert list_strategy(l) == "simple_range" + l = range(1, 2) assert list_strategy(l) == "range" l = [1, "b", 3] assert list_strategy(l) == "object" diff --git a/pypy/module/_pypyjson/targetjson.py b/pypy/module/_pypyjson/targetjson.py --- a/pypy/module/_pypyjson/targetjson.py +++ b/pypy/module/_pypyjson/targetjson.py @@ -4,12 +4,10 @@ sys.path.insert(0, str(ROOT)) import time -from rpython.rlib.streamio import open_file_as_stream from pypy.interpreter.error import OperationError from pypy.module._pypyjson.interp_decoder import loads - ## MSG = open('msg.json').read() class W_Root(object): @@ -92,7 +90,7 @@ def wrapfloat(self, x): return W_Float(x) - + def wrap(self, x): if isinstance(x, int): return W_Int(x) @@ -109,7 +107,6 @@ def myloads(msg): return loads(fakespace, W_String(msg)) - def bench(title, N, fn, arg): a = time.clock() @@ -124,14 +121,14 @@ return 1 filename = argv[1] N = int(argv[2]) - f = open_file_as_stream(filename) - msg = f.readall() - + f = open(filename) + msg = f.read() + try: bench('loads ', N, myloads, msg) except OperationError, e: print 'Error', e._compute_value(fakespace) - + return 0 # _____ Define and setup target ___ diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -20,6 +20,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi + def unpack_fields(space, w_fields): fields_w = space.unpackiterable(w_fields) fields = [] diff --git a/pypy/module/gc/__init__.py b/pypy/module/gc/__init__.py --- a/pypy/module/gc/__init__.py +++ b/pypy/module/gc/__init__.py @@ -1,5 +1,6 @@ from pypy.interpreter.mixedmodule import MixedModule - + + class Module(MixedModule): interpleveldefs = { 'collect': 'interp_gc.collect', @@ -8,15 +9,14 @@ 'isenabled': 'interp_gc.isenabled', 'enable_finalizers': 'interp_gc.enable_finalizers', 'disable_finalizers': 'interp_gc.disable_finalizers', - 'garbage' : 'space.newlist([])', + 'garbage': 'space.newlist([])', #'dump_heap_stats': 'interp_gc.dump_heap_stats', } - appleveldefs = { - } + appleveldefs = {} def __init__(self, space, w_name): if (not space.config.translating or - space.config.translation.gctransformer == "framework"): + space.config.translation.gctransformer == "framework"): self.appleveldefs.update({ 'dump_rpy_heap': 'app_referents.dump_rpy_heap', }) diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -1,7 +1,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError from rpython.rlib import rgc -from rpython.rlib.streamio import open_file_as_stream + @unwrap_spec(generation=int) def collect(space, generation=0): @@ -56,7 +56,7 @@ if not tb: raise OperationError(space.w_RuntimeError, space.wrap("Wrong GC")) - f = open_file_as_stream(filename, mode="w") + f = open(filename, mode="w") for i in range(len(tb)): f.write("%d %d " % (tb[i].count, tb[i].size)) f.write(",".join([str(tb[i].links[j]) for j in range(len(tb))]) + "\n") diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py --- a/pypy/module/gc/test/test_gc.py +++ b/pypy/module/gc/test/test_gc.py @@ -1,5 +1,6 @@ import py + class AppTestGC(object): def test_collect(self): import gc @@ -69,6 +70,7 @@ gc.enable() assert gc.isenabled() + class AppTestGcDumpHeap(object): pytestmark = py.test.mark.xfail(run=False) @@ -81,10 +83,10 @@ self.count = count self.size = size self.links = links - + def fake_heap_stats(): return [X(1, 12, [0, 0]), X(2, 10, [10, 0])] - + cls._heap_stats = rgc._heap_stats rgc._heap_stats = fake_heap_stats fname = udir.join('gcdump.log') @@ -94,10 +96,10 @@ def teardown_class(cls): import py from rpython.rlib import rgc - + rgc._heap_stats = cls._heap_stats assert py.path.local(cls._fname).read() == '1 12 0,0\n2 10 10,0\n' - + def test_gc_heap_stats(self): import gc gc.dump_heap_stats(self.fname) @@ -124,6 +126,7 @@ for r in rlist: assert r() is None + class AppTestGcMapDictIndexCache(AppTestGcMethodCache): spaceconfig = {"objspace.std.withmethodcache": True, "objspace.std.withmapdict": True} diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -2,7 +2,7 @@ class TestGenerators(BaseTestPyPyC): - def test_simple_generator(self): + def test_simple_generator1(self): def main(n): def f(): for i in range(10000): @@ -28,7 +28,36 @@ jump(..., descr=...) """) assert loop.match_by_id("subtract", """ - setfield_gc(p7, 35, descr=<.*last_instr .*>) # XXX bad, kill me + i2 = int_sub(i1, 42) + """) + + def test_simple_generator2(self): + def main(n): + def f(): + for i in range(1, 10000): + i -= 1 + i -= 42 # ID: subtract + yield i + + def g(): + for i in f(): # ID: generator + pass + + g() + + log = self.run(main, [500]) + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id("generator", """ + cond_call(..., descr=...) + i16 = force_token() + p45 = new_with_vtable(ConstClass(W_IntObject)) + setfield_gc(p45, i29, descr=) + setarrayitem_gc(p8, 0, p45, descr=) + i47 = arraylen_gc(p8, descr=) # Should be removed by backend + jump(..., descr=...) + """) + assert loop.match_by_id("subtract", """ + setfield_gc(p7, 38, descr=<.*last_instr .*>) # XXX bad, kill me i2 = int_sub_ovf(i1, 42) guard_no_overflow(descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -160,7 +160,7 @@ jump(..., descr=...) """) - def test_range_iter(self): + def test_range_iter_simple(self): def main(n): def g(n): return range(n) @@ -178,6 +178,36 @@ guard_not_invalidated? i16 = int_ge(i11, i12) guard_false(i16, descr=...) + i20 = int_add(i11, 1) + i21 = force_token() + setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) + guard_not_invalidated? + i25 = int_ge(i11, i9) + guard_false(i25, descr=...) + i27 = int_add_ovf(i7, i11) + guard_no_overflow(descr=...) + --TICK-- + jump(..., descr=...) + """) + + def test_range_iter_normal(self): + def main(n): + def g(n): + return range(n) + s = 0 + for i in range(1, n): # ID: for + tmp = g(n) + s += tmp[i] # ID: getitem + a = 0 + return s + # + log = self.run(main, [1000]) + assert log.result == 1000 * 999 / 2 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + guard_not_invalidated? + i16 = int_ge(i11, i12) + guard_false(i16, descr=...) i17 = int_mul(i11, i14) i18 = int_add(i15, i17) i20 = int_add(i11, 1) diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -293,7 +293,8 @@ class AppTestSelectWithSockets(_AppTestSelect): """Same tests with connected sockets. socket.socketpair() does not exists on win32, - so we start our own server.""" + so we start our own server. + """ spaceconfig = { "usemodules": ["select", "_socket", "rctime", "thread"], } @@ -315,7 +316,7 @@ except OperationError, e: # should get a "Permission denied" if not e.match(space, space.getattr(w_socketmod, space.wrap("error"))): raise - print e + print e.errorstr(space) except cls.w_sock_err, e: # should get a "Permission denied" print e else: @@ -331,4 +332,8 @@ thread.start_new_thread(s2.connect, (self.sockaddress,)) s1, addr2 = self.sock.accept() + # speed up the tests that want to fill the buffers + s1.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 4096) + s2.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 4096) + return s1, s2 diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -13,6 +13,7 @@ from rpython.rlib.rarithmetic import ( LONG_BIT, is_valid_int, ovfcheck, r_longlong, r_uint, string_to_int) from rpython.rlib.rbigint import rbigint +from rpython.rlib.rfloat import DBL_MANT_DIG from rpython.rlib.rstring import ( InvalidBaseError, ParseStringError, ParseStringOverflowError) from rpython.tool.sourcetools import func_renamer, func_with_new_name @@ -163,10 +164,18 @@ def _truediv(space, x, y): + if not y: + raise oefmt(space.w_ZeroDivisionError, "division by zero") + + if (DBL_MANT_DIG < LONG_BIT and + (r_uint(abs(x)) >> DBL_MANT_DIG or r_uint(abs(y)) >> DBL_MANT_DIG)): + # large x or y, use long arithmetic + raise OverflowError + + # both ints can be exactly represented as doubles, do a + # floating-point division a = float(x) b = float(y) - if b == 0.0: - raise oefmt(space.w_ZeroDivisionError, "division by zero") return space.wrap(a / b) @@ -589,7 +598,7 @@ descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) descr_div, descr_rdiv = _make_descr_binop(_div) - descr_truediv, descr_rtruediv = _make_descr_binop(_truediv, ovf=False) + descr_truediv, descr_rtruediv = _make_descr_binop(_truediv) descr_mod, descr_rmod = _make_descr_binop(_mod) descr_divmod, descr_rdivmod = _make_descr_binop( _divmod, ovf2small=_divmod_ovf2small) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -43,6 +43,9 @@ if length <= 0: strategy = space.fromcache(EmptyListStrategy) storage = strategy.erase(None) + elif start == 0 and step == 1: + strategy = space.fromcache(SimpleRangeListStrategy) + storage = strategy.erase((length,)) else: strategy = space.fromcache(RangeListStrategy) storage = strategy.erase((start, step, length)) @@ -138,7 +141,6 @@ class W_ListObject(W_Root): - strategy = None def __init__(self, space, wrappeditems, sizehint=-1): @@ -999,15 +1001,7 @@ self.sizehint = hint -class RangeListStrategy(ListStrategy): - """RangeListStrategy is used when a list is created using the range method. - The storage is a tuple containing only three integers start, step and - length and elements are calculated based on these values. On any operation - destroying the range (inserting, appending non-ints) the strategy is - switched to IntegerListStrategy.""" - - _applevel_repr = "range" - +class BaseRangeListStrategy(ListStrategy): def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) strategy = w_list.strategy = self.space.fromcache(IntegerListStrategy) @@ -1022,10 +1016,6 @@ def init_from_list_w(self, w_list, list_w): raise NotImplementedError - erase, unerase = rerased.new_erasing_pair("range") - erase = staticmethod(erase) - unerase = staticmethod(unerase) - def clone(self, w_list): storage = w_list.lstorage # lstorage is tuple, no need to clone w_clone = W_ListObject.from_storage_and_strategy(self.space, storage, @@ -1040,6 +1030,157 @@ w_other.strategy = self w_other.lstorage = w_list.lstorage + def getitem(self, w_list, i): + return self.wrap(self._getitem_unwrapped(w_list, i)) + + def getitems_int(self, w_list): + return self._getitems_range(w_list, False) + + def getitems_copy(self, w_list): + return self._getitems_range(w_list, True) + + def getstorage_copy(self, w_list): + # tuple is immutable + return w_list.lstorage + + @jit.dont_look_inside + def getitems_fixedsize(self, w_list): + return self._getitems_range_unroll(w_list, True) + + def getitems_unroll(self, w_list): + return self._getitems_range_unroll(w_list, True) + + def getslice(self, w_list, start, stop, step, length): + self.switch_to_integer_strategy(w_list) + return w_list.getslice(start, stop, step, length) + + def append(self, w_list, w_item): + if type(w_item) is W_IntObject: + self.switch_to_integer_strategy(w_list) + else: + w_list.switch_to_object_strategy() + w_list.append(w_item) + + def inplace_mul(self, w_list, times): + self.switch_to_integer_strategy(w_list) + w_list.inplace_mul(times) + + def deleteslice(self, w_list, start, step, slicelength): + self.switch_to_integer_strategy(w_list) + w_list.deleteslice(start, step, slicelength) + + def setitem(self, w_list, index, w_item): + self.switch_to_integer_strategy(w_list) + w_list.setitem(index, w_item) + + def setslice(self, w_list, start, step, slicelength, sequence_w): + self.switch_to_integer_strategy(w_list) + w_list.setslice(start, step, slicelength, sequence_w) + + def insert(self, w_list, index, w_item): + self.switch_to_integer_strategy(w_list) + w_list.insert(index, w_item) + + def extend(self, w_list, w_any): + self.switch_to_integer_strategy(w_list) + w_list.extend(w_any) + + def reverse(self, w_list): + self.switch_to_integer_strategy(w_list) + w_list.reverse() + + def sort(self, w_list, reverse): + step = self.step(w_list) + if step > 0 and reverse or step < 0 and not reverse: + self.switch_to_integer_strategy(w_list) + w_list.sort(reverse) + + +class SimpleRangeListStrategy(BaseRangeListStrategy): + """SimpleRangeListStrategy is used when a list is created using the range + method providing only positive length. The storage is a one element tuple + with positive integer storing length.""" + + _applevel_repr = "simple_range" + + erase, unerase = rerased.new_erasing_pair("simple_range") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def find(self, w_list, w_obj, startindex, stopindex): + if type(w_obj) is W_IntObject: + obj = self.unwrap(w_obj) + length = self.unerase(w_list.lstorage)[0] + if 0 <= obj < length and startindex <= obj < stopindex: + return obj + else: + raise ValueError + return ListStrategy.find(self, w_list, w_obj, startindex, stopindex) + + def length(self, w_list): + return self.unerase(w_list.lstorage)[0] + + def step(self, w_list): + return 1 + + def _getitem_unwrapped(self, w_list, i): + length = self.unerase(w_list.lstorage)[0] + if i < 0: + i += length + if i < 0: + raise IndexError + elif i >= length: + raise IndexError + return i + + @specialize.arg(2) + def _getitems_range(self, w_list, wrap_items): + length = self.unerase(w_list.lstorage)[0] + if wrap_items: + r = [None] * length + else: + r = [0] * length + i = 0 + while i < length: + if wrap_items: + r[i] = self.wrap(i) + else: + r[i] = i + i += 1 + + return r + + _getitems_range_unroll = jit.unroll_safe( + func_with_new_name(_getitems_range, "_getitems_range_unroll")) + + def pop_end(self, w_list): + new_length = self.unerase(w_list.lstorage)[0] - 1 + w_result = self.wrap(new_length) + if new_length > 0: + w_list.lstorage = self.erase((new_length,)) + else: + strategy = w_list.strategy = self.space.fromcache(EmptyListStrategy) + w_list.lstorage = strategy.erase(None) + return w_result + + def pop(self, w_list, index): + self.switch_to_integer_strategy(w_list) + return w_list.pop(index) + + +class RangeListStrategy(BaseRangeListStrategy): + """RangeListStrategy is used when a list is created using the range method. + The storage is a tuple containing only three integers start, step and + length and elements are calculated based on these values. On any operation + destroying the range (inserting, appending non-ints) the strategy is + switched to IntegerListStrategy.""" + + _applevel_repr = "range" + + erase, unerase = rerased.new_erasing_pair("range") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + def find(self, w_list, w_obj, startindex, stopindex): if type(w_obj) is W_IntObject: obj = self.unwrap(w_obj) @@ -1059,6 +1200,9 @@ def length(self, w_list): return self.unerase(w_list.lstorage)[2] + def step(self, w_list): + return self.unerase(w_list.lstorage)[1] + def _getitem_unwrapped(self, w_list, i): v = self.unerase(w_list.lstorage) start = v[0] @@ -1072,19 +1216,6 @@ raise IndexError return start + i * step - def getitems_int(self, w_list): - return self._getitems_range(w_list, False) - - def getitem(self, w_list, i): - return self.wrap(self._getitem_unwrapped(w_list, i)) - - def getitems_copy(self, w_list): - return self._getitems_range(w_list, True) - - def getstorage_copy(self, w_list): - # tuple is unmutable - return w_list.lstorage - @specialize.arg(2) def _getitems_range(self, w_list, wrap_items): l = self.unerase(w_list.lstorage) @@ -1107,34 +1238,9 @@ return r - @jit.dont_look_inside - def getitems_fixedsize(self, w_list): - return self._getitems_range_unroll(w_list, True) - - def getitems_unroll(self, w_list): - return self._getitems_range_unroll(w_list, True) _getitems_range_unroll = jit.unroll_safe( func_with_new_name(_getitems_range, "_getitems_range_unroll")) - def getslice(self, w_list, start, stop, step, length): - self.switch_to_integer_strategy(w_list) - return w_list.getslice(start, stop, step, length) - - def append(self, w_list, w_item): - if type(w_item) is W_IntObject: - self.switch_to_integer_strategy(w_list) - else: - w_list.switch_to_object_strategy() - w_list.append(w_item) - - def inplace_mul(self, w_list, times): - self.switch_to_integer_strategy(w_list) - w_list.inplace_mul(times) - - def deleteslice(self, w_list, start, step, slicelength): - self.switch_to_integer_strategy(w_list) - w_list.deleteslice(start, step, slicelength) - def pop_end(self, w_list): start, step, length = self.unerase(w_list.lstorage) w_result = self.wrap(start + (length - 1) * step) @@ -1158,32 +1264,6 @@ self.switch_to_integer_strategy(w_list) return w_list.pop(index) - def setitem(self, w_list, index, w_item): - self.switch_to_integer_strategy(w_list) - w_list.setitem(index, w_item) - - def setslice(self, w_list, start, step, slicelength, sequence_w): - self.switch_to_integer_strategy(w_list) - w_list.setslice(start, step, slicelength, sequence_w) - - def sort(self, w_list, reverse): - step = self.unerase(w_list.lstorage)[1] - if step > 0 and reverse or step < 0 and not reverse: - self.switch_to_integer_strategy(w_list) - w_list.sort(reverse) - - def insert(self, w_list, index, w_item): - self.switch_to_integer_strategy(w_list) - w_list.insert(index, w_item) - - def extend(self, w_list, w_any): - self.switch_to_integer_strategy(w_list) - w_list.extend(w_any) - - def reverse(self, w_list): - self.switch_to_integer_strategy(w_list) - w_list.reverse() - class AbstractUnwrappedStrategy(object): @@ -1541,7 +1621,7 @@ _base_extend_from_list = _extend_from_list def _extend_from_list(self, w_list, w_other): - if w_other.strategy is self.space.fromcache(RangeListStrategy): + if isinstance(w_other.strategy, BaseRangeListStrategy): l = self.unerase(w_list.lstorage) other = w_other.getitems_int() assert other is not None diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -563,6 +563,25 @@ base = MyInt(24) assert int('10', base) == 24 + def test_truediv(self): + import operator + x = 1000000 + a = x / 2 + assert a == 500000 + a = operator.truediv(x, 2) + assert a == 500000.0 + + x = 63050394783186940 + a = x / 7 + assert a == 9007199254740991 + a = operator.truediv(x, 7) + assert a == 9007199254740991.0 + exec("from __future__ import division; " + "a = x / 7; b = operator.truediv(x, 7)") + assert a == 9007199254740991.0 + assert b == 9007199254740991.0 + + class AppTestIntShortcut(AppTestInt): spaceconfig = {"objspace.std.intshortcut": True} diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -166,7 +166,6 @@ self.space.setitem(w_lhslist, w_slice, w_rhslist) assert self.space.unwrap(w_lhslist) == expected - test1([5,7,1,4], 1, 3, [9,8], [5,9,8,4]) test1([5,7,1,4], 1, 3, [9], [5,9,4]) test1([5,7,1,4], 1, 3, [9,8,6],[5,9,8,6,4]) @@ -294,6 +293,7 @@ self.space.w_True) assert self.space.eq_w(self.space.eq(w_list2, w_list3), self.space.w_False) + def test_ne(self): w = self.space.wrap @@ -312,6 +312,7 @@ self.space.w_False) assert self.space.eq_w(self.space.ne(w_list2, w_list3), self.space.w_True) + def test_lt(self): w = self.space.wrap @@ -429,6 +430,7 @@ with py.test.raises(ValueError): intlist.find(w(4), 0, 2) + class AppTestW_ListObject(object): def setup_class(cls): import platform @@ -662,7 +664,6 @@ raises(IndexError, "l[1]") def test_setitem(self): - l = [] raises(IndexError, "l[1] = 2") @@ -861,7 +862,6 @@ raises(TypeError, "[0]*MyInt(3)") raises(TypeError, "[0]*MyIndex(MyInt(3))") - def test_index(self): c = range(10) assert c.index(0) == 0 @@ -1318,6 +1318,8 @@ assert ([5] >= [N]) is False def test_resizelist_hint(self): + if self.on_cpython: + skip('pypy-only test') import __pypy__ l2 = [] __pypy__.resizelist_hint(l2, 100) @@ -1326,6 +1328,8 @@ assert len(l1) == 0 def test_use_method_for_wrong_object(self): + if self.on_cpython: + skip('pypy-only test') raises(TypeError, list.append.im_func, 1, 2) def test_ne_NotImplemented(self): @@ -1439,7 +1443,20 @@ def test_getitem(self): l = range(5) - raises(IndexError, "l[-10]") + raises(IndexError, "l[-6]") + raises(IndexError, "l[5]") + assert l[0] == 0 + assert l[-1] == 4 + assert l[-2] == 3 + assert l[-5] == 0 + + l = range(1, 5) + raises(IndexError, "l[-5]") + raises(IndexError, "l[4]") + assert l[0] == 1 + assert l[-1] == 4 + assert l[-2] == 3 + assert l[-4] == 1 def test_append(self): l = range(5) @@ -1515,6 +1532,7 @@ notshared = l[:] assert notshared == [] + class AppTestListFastSubscr: spaceconfig = {"objspace.std.optimized_list_getitem": True} diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,5 +1,8 @@ import sys -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, BytesListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy +from pypy.objspace.std.listobject import ( + W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, + FloatListStrategy, BytesListStrategy, RangeListStrategy, + SimpleRangeListStrategy, make_range_list, UnicodeListStrategy) from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -18,7 +21,7 @@ UnicodeListStrategy) assert isinstance(W_ListObject(space, [w(u'a'), w('b')]).strategy, ObjectListStrategy) # mixed unicode and bytes - + def test_empty_to_any(self): space = self.space w = space.wrap @@ -183,7 +186,7 @@ def test_setslice(self): space = self.space w = space.wrap - + l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) l.setslice(0, 1, 2, W_ListObject(space, [w(1), w(2), w(3)])) @@ -286,7 +289,7 @@ def test_empty_setslice_with_objectlist(self): space = self.space w = space.wrap - + l = W_ListObject(space, []) o = W_ListObject(space, [space.wrap(1), space.wrap("2"), space.wrap(3)]) l.setslice(0, 1, o.length(), o) @@ -347,6 +350,13 @@ empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) + r = make_range_list(space, 0, 1, 10) + empty.extend(r) + assert isinstance(empty.strategy, SimpleRangeListStrategy) + assert space.is_true(space.eq(empty.getitem(1), w(1))) + + empty = W_ListObject(space, []) + assert isinstance(empty.strategy, EmptyListStrategy) empty.extend(W_ListObject(space, [w(1), w(2), w(3)])) assert isinstance(empty.strategy, IntegerListStrategy) @@ -397,6 +407,72 @@ l.append(self.space.wrap(19)) assert isinstance(l.strategy, IntegerListStrategy) + def test_simplerangelist(self): + l = make_range_list(self.space, 0, 1, 10) + assert isinstance(l.strategy, SimpleRangeListStrategy) + v = l.pop(5) + assert self.space.eq_w(v, self.space.wrap(5)) + assert isinstance(l.strategy, IntegerListStrategy) + + l = make_range_list(self.space, 0, 1, 10) + assert isinstance(l.strategy, SimpleRangeListStrategy) + v = l.pop(0) + assert self.space.eq_w(v, self.space.wrap(0)) + assert isinstance(l.strategy, IntegerListStrategy) + + l = make_range_list(self.space, 0, 1, 10) + assert isinstance(l.strategy, SimpleRangeListStrategy) + v = l.pop_end() + assert self.space.eq_w(v, self.space.wrap(9)) + assert isinstance(l.strategy, SimpleRangeListStrategy) + v = l.pop_end() + assert self.space.eq_w(v, self.space.wrap(8)) + assert isinstance(l.strategy, SimpleRangeListStrategy) + + l = make_range_list(self.space, 0, 1, 5) + assert isinstance(l.strategy, SimpleRangeListStrategy) + l.append(self.space.wrap("string")) + assert isinstance(l.strategy, ObjectListStrategy) + + l = make_range_list(self.space, 0,1,5) + assert isinstance(l.strategy, SimpleRangeListStrategy) + l.append(self.space.wrap(19)) + assert isinstance(l.strategy, IntegerListStrategy) + + l = make_range_list(self.space, 0,1,5) + assert isinstance(l.strategy, SimpleRangeListStrategy) + assert l.find(self.space.wrap(0)) == 0 + assert l.find(self.space.wrap(4)) == 4 + + try: + l.find(self.space.wrap(5)) + except ValueError: + pass + else: + assert False, "Did not raise ValueError" + + try: + l.find(self.space.wrap(0), 5, 6) + except ValueError: + pass + else: + assert False, "Did not raise ValueError" + + assert l.length() == 5 + + l = make_range_list(self.space, 0, 1, 1) + assert self.space.eq_w(l.pop(0), self.space.wrap(0)) + + l = make_range_list(self.space, 0, 1, 10) + l.sort(False) + assert isinstance(l.strategy, SimpleRangeListStrategy) + + assert self.space.eq_w(l.getitem(5), self.space.wrap(5)) + + l = make_range_list(self.space, 0, 1, 1) + assert self.space.eq_w(l.pop_end(), self.space.wrap(0)) + assert isinstance(l.strategy, EmptyListStrategy) + def test_keep_range(self): # simple list l = make_range_list(self.space, 1,1,5) diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -72,28 +72,43 @@ r.sort(key=lambda x: -x) assert r == range(9, -1, -1) def test_pop(self): + # RangeListStrategy + r = range(1, 10) + res = r.pop() + assert res == 9 + assert self.not_forced(r) + assert repr(r) == repr(range(1, 9)) + res = r.pop(0) + assert res == 1 + assert self.not_forced(r) + assert repr(r) == repr(range(2, 9)) + res = r.pop(len(r) - 1) + assert res == 8 + assert self.not_forced(r) + assert repr(r) == repr(range(2, 8)) + res = r.pop(2) + assert res == 4 + assert not self.not_forced(r) + assert r == [2, 3, 5, 6, 7] + res = r.pop(2) + assert res == 5 + assert not self.not_forced(r) + assert r == [2, 3, 6, 7] + + # SimpleRangeListStrategy r = range(10) res = r.pop() assert res == 9 assert self.not_forced(r) - assert repr(r) == repr(range(9)) + res = r.pop() + assert res == 8 + assert repr(r) == repr(range(8)) + assert self.not_forced(r) res = r.pop(0) assert res == 0 - assert self.not_forced(r) - assert repr(r) == repr(range(1, 9)) - res = r.pop(len(r) - 1) - assert res == 8 - assert self.not_forced(r) - assert repr(r) == repr(range(1, 8)) - res = r.pop(2) - assert res == 3 assert not self.not_forced(r) - assert r == [1, 2, 4, 5, 6, 7] - res = r.pop(2) - assert res == 4 - assert not self.not_forced(r) - assert r == [1, 2, 5, 6, 7] - + assert r == [1, 2, 3, 4, 5, 6, 7] + def test_reduce(self): it = iter(range(10)) assert it.next() == 0 diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -8,8 +8,6 @@ (gdb) python execfile('/path/to/gdb_pypy.py') """ -from __future__ import with_statement - import re import sys import os.path @@ -120,15 +118,16 @@ """ Returns a mapping offset --> description """ + import tempfile + import zlib vname = 'pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_typeids_z' length = int(self.gdb.parse_and_eval('*(long*)%s' % vname)) vstart = '(char*)(((long*)%s)+1)' % vname - self.gdb.execute('dump binary memory /tmp/typeids.txt.z %s %s+%d' - % (vstart, vstart, length)) - s = open('/tmp/typeids.txt.z', 'rb').read() - import zlib; typeids_txt = zlib.decompress(s) - typeids = TypeIdsMap(typeids_txt.splitlines(True), self.gdb) - return typeids + with tempfile.NamedTemporaryFile('rb') as fobj: + self.gdb.execute('dump binary memory %s %s %s+%d' % + (fobj.name, vstart, vstart, length)) + data = fobj.read() + return TypeIdsMap(zlib.decompress(data).splitlines(True), self.gdb) class TypeIdsMap(object): diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -1,4 +1,3 @@ - """ This file makes open() and friends RPython. Note that RFile should not be used directly and instead it's magically appearing each time you call python builtin open() @@ -17,27 +16,27 @@ def llexternal(*args, **kwargs): return rffi.llexternal(*args, compilation_info=eci, **kwargs) -FILE = lltype.Struct('FILE') # opaque type maybe - class CConfig(object): _compilation_info_ = eci off_t = platform.SimpleType('off_t') +config = platform.configure(CConfig) -CC = platform.configure(CConfig) -OFF_T = CC['off_t'] +OFF_T = config['off_t'] +FILE = lltype.Struct('FILE') # opaque type maybe + c_open = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) c_close = llexternal('fclose', [lltype.Ptr(FILE)], rffi.INT) c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, - lltype.Ptr(FILE)], rffi.SIZE_T) + lltype.Ptr(FILE)], rffi.SIZE_T) c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, - lltype.Ptr(FILE)], rffi.SIZE_T) + lltype.Ptr(FILE)], rffi.SIZE_T) c_feof = llexternal('feof', [lltype.Ptr(FILE)], rffi.INT) c_ferror = llexternal('ferror', [lltype.Ptr(FILE)], rffi.INT) c_clearerror = llexternal('clearerr', [lltype.Ptr(FILE)], lltype.Void) c_fseek = llexternal('fseek', [lltype.Ptr(FILE), rffi.LONG, rffi.INT], - rffi.INT) + rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) c_fileno = llexternal('fileno', [lltype.Ptr(FILE)], rffi.INT) c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) @@ -53,6 +52,13 @@ BASE_BUF_SIZE = 4096 BASE_LINE_SIZE = 100 + +def _error(ll_file): + errno = c_ferror(ll_file) + c_clearerror(ll_file) + raise OSError(errno, os.strerror(errno)) + + def create_file(filename, mode="r", buffering=-1): assert buffering == -1 assert filename is not None @@ -71,6 +77,7 @@ lltype.free(ll_name, flavor='raw') return RFile(ll_f) + def create_temp_rfile(): res = c_tmpfile() if not res: @@ -78,6 +85,7 @@ raise OSError(errno, os.strerror(errno)) return RFile(res) + def create_popen_file(command, type): ll_command = rffi.str2charp(command) try: @@ -93,6 +101,7 @@ lltype.free(ll_command, flavor='raw') return RPopenFile(ll_f) + class RFile(object): def __init__(self, ll_file): self.ll_file = ll_file @@ -224,7 +233,7 @@ while raw_buf[strlen] != '\0': strlen += 1 if (strlen == BASE_LINE_SIZE - 1 and - raw_buf[BASE_LINE_SIZE - 2] != '\n'): + raw_buf[BASE_LINE_SIZE - 2] != '\n'): return -1 # overflow! # common case return strlen @@ -255,9 +264,3 @@ class RPopenFile(RFile): _do_close = staticmethod(c_pclose) - - -def _error(ll_file): - errno = c_ferror(ll_file) - c_clearerror(ll_file) - raise OSError(errno, os.strerror(errno)) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -7,6 +7,7 @@ from rpython.rlib import jit from rpython.translator.platform import platform + class CConstantErrno(CConstant): # these accessors are used when calling get_errno() or set_errno() # on top of CPython @@ -20,6 +21,7 @@ def __setitem__(self, index, value): assert index == 0 ll2ctypes.TLS.errno = value + if os.name == 'nt': if platform.name == 'msvc': includes=['errno.h','stdio.h'] diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -1,4 +1,3 @@ - from rpython.annotator import model as annmodel from rpython.flowspace.model import Constant from rpython.rlib import rarithmetic, objectmodel diff --git a/rpython/rtyper/tool/rffi_platform.py b/rpython/rtyper/tool/rffi_platform.py --- a/rpython/rtyper/tool/rffi_platform.py +++ b/rpython/rtyper/tool/rffi_platform.py @@ -198,12 +198,14 @@ """ for attr in ['_includes_', '_libraries_', '_sources_', '_library_dirs_', '_include_dirs_', '_header_']: - assert not hasattr(CConfig, attr), "Found legacy attribute %s on CConfig" % (attr,) + assert not hasattr(CConfig, attr), \ + "Found legacy attribute %s on CConfig" % attr + entries = [] for key in dir(CConfig): value = getattr(CConfig, key) if isinstance(value, CConfigEntry): - entries.append((key, value)) + entries.append((key, value)) if entries: # can be empty if there are only CConfigSingleEntries writer = _CWriter(CConfig) From noreply at buildbot.pypy.org Wed Mar 5 05:39:06 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 05:39:06 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: try to fix this test from occasionally hanging untranslated Message-ID: <20140305043906.74B561C0483@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69702:114c53c1bf8d Date: 2014-03-04 23:13 -0500 http://bitbucket.org/pypy/pypy/changeset/114c53c1bf8d/ Log: try to fix this test from occasionally hanging untranslated diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -258,7 +258,7 @@ return FileAsSocket(s1), FileAsSocket(s2) def test_poll_threaded(self): - import os, select, threading, time + import os, select, thread, time if not hasattr(select, 'poll'): skip("no select.poll() on this platform") r, w = os.pipe() @@ -268,11 +268,9 @@ for fd in rfds: pollster.register(fd, select.POLLIN) - t = threading.Thread(target=pollster.poll) - t.start() + t = thread.start_new_thread(pollster.poll, ()) try: - for i in range(5): - time.sleep(0.1); print '', # print to release GIL untranslated + time.sleep(0.1); print '', # print to release GIL untranslated # trigger ufds array reallocation for fd in rfds: pollster.unregister(fd) @@ -282,7 +280,7 @@ finally: # and make the call to poll() from the thread return os.write(w, b'spam') - t.join() + time.sleep(0.1); print '', # print to release GIL untranslated finally: os.close(r) os.close(w) From noreply at buildbot.pypy.org Wed Mar 5 07:14:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 07:14:07 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: more hacking at this test Message-ID: <20140305061407.2BCAE1C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69703:9f7c1ad59171 Date: 2014-03-05 06:13 +0000 http://bitbucket.org/pypy/pypy/changeset/9f7c1ad59171/ Log: more hacking at this test diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -270,7 +270,8 @@ t = thread.start_new_thread(pollster.poll, ()) try: - time.sleep(0.1); print '', # print to release GIL untranslated + time.sleep(0.1) + for i in range(5): print '', # to release GIL untranslated # trigger ufds array reallocation for fd in rfds: pollster.unregister(fd) @@ -280,7 +281,8 @@ finally: # and make the call to poll() from the thread return os.write(w, b'spam') - time.sleep(0.1); print '', # print to release GIL untranslated + time.sleep(0.1) + for i in range(5): print '', # to release GIL untranslated finally: os.close(r) os.close(w) From noreply at buildbot.pypy.org Wed Mar 5 07:44:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 07:44:26 +0100 (CET) Subject: [pypy-commit] stmgc default: Missing one potential root inside the major gc tracing. Message-ID: <20140305064426.CB89A1C0632@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r954:5e4ec1af0e0c Date: 2014-03-05 07:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/5e4ec1af0e0c/ Log: Missing one potential root inside the major gc tracing. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -405,6 +405,14 @@ tl = tl->next; } while (tl != stm_all_thread_locals); } + + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + if (get_priv_segment(i)->transaction_state != TS_NONE) + mark_visit_object( + get_priv_segment(i)->threadlocal_at_start_of_transaction, + get_segment_base(i)); + } } static void mark_visit_from_modified_objects(void) diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -14,6 +14,7 @@ typedef struct { object_t **shadowstack, **shadowstack_base; + object_t *thread_local_obj; int associated_segment_num; ...; } stm_thread_local_t; @@ -465,3 +466,11 @@ addr = lib._stm_get_segment_base(i) content = addr[int(ffi.cast("uintptr_t", obj)) + offset] assert content == expected_content + + def get_thread_local_obj(self): + tl = self.tls[self.current_thread] + return tl.thread_local_obj + + def set_thread_local_obj(self, newobj): + tl = self.tls[self.current_thread] + tl.thread_local_obj = newobj diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -207,3 +207,25 @@ def test_reshare_if_no_longer_modified_1(self): self.test_reshare_if_no_longer_modified_0(invert=1) + + def test_threadlocal_at_start_of_transaction(self): + self.start_transaction() + x = stm_allocate(16) + stm_set_char(x, 'L') + self.set_thread_local_obj(x) + self.commit_transaction() + + self.start_transaction() + assert stm_get_char(self.get_thread_local_obj()) == 'L' + self.set_thread_local_obj(stm_allocate(32)) + stm_minor_collect() + self.abort_transaction() + + self.start_transaction() + assert stm_get_char(self.get_thread_local_obj()) == 'L' + self.set_thread_local_obj(stm_allocate(32)) + stm_major_collect() + self.abort_transaction() + + self.start_transaction() + assert stm_get_char(self.get_thread_local_obj()) == 'L' From noreply at buildbot.pypy.org Wed Mar 5 09:20:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 09:20:42 +0100 (CET) Subject: [pypy-commit] cffi default: Clean up 'sflags'. It was failing because suddenly passing a value Message-ID: <20140305082042.4D2CA1C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1471:9cdae12e006f Date: 2014-03-05 09:19 +0100 http://bitbucket.org/cffi/cffi/changeset/9cdae12e006f/ Log: Clean up 'sflags'. It was failing because suddenly passing a value of 0 as argument was not equivalent to not passing any argument at all. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3583,10 +3583,40 @@ return cf; /* borrowed reference */ } -#define SF_MSVC_BITFIELDS 1 -#define SF_GCC_ARM_BITFIELDS 2 -#define SF_GCC_BIG_ENDIAN 4 -#define SF_PACKED 8 +#define SF_MSVC_BITFIELDS 0x01 +#define SF_GCC_ARM_BITFIELDS 0x02 +#define SF_GCC_X86_BITFIELDS 0x10 + +#define SF_GCC_BIG_ENDIAN 0x04 +#define SF_GCC_LITTLE_ENDIAN 0x40 + +#define SF_PACKED 0x08 + +static int complete_sflags(int sflags) +{ + /* add one of the SF_xxx_BITFIELDS flags if none is specified */ + if (!(sflags & (SF_MSVC_BITFIELDS | SF_GCC_ARM_BITFIELDS | + SF_GCC_X86_BITFIELDS))) { +#ifdef MS_WIN32 + sflags |= SF_MSVC_BITFIELDS; +#else +# ifdef __arm__ + sflags |= SF_GCC_ARM_BITFIELDS; +# else + sflags |= SF_GCC_X86_BITFIELDS; +# endif +#endif + } + /* add one of SF_GCC_xx_ENDIAN if none is specified */ + if (!(sflags & (SF_GCC_BIG_ENDIAN | SF_GCC_LITTLE_ENDIAN))) { + int _check_endian = 1; + if (*(char *)&_check_endian == 0) + sflags |= SF_GCC_BIG_ENDIAN; + else + sflags |= SF_GCC_LITTLE_ENDIAN; + } + return sflags; +} static PyObject *b_complete_struct_or_union(PyObject *self, PyObject *args) { @@ -3598,18 +3628,7 @@ int totalalignment = -1; CFieldObject **previous; int prev_bitfield_size, prev_bitfield_free; -#ifdef MS_WIN32 - int sflags = SF_MSVC_BITFIELDS; -#else -# ifdef __arm__ - int sflags = SF_GCC_ARM_BITFIELDS; -# else int sflags = 0; -# endif - int _check_endian = 1; - if (*(char *)&_check_endian == 0) - sflags |= SF_GCC_BIG_ENDIAN; -#endif if (!PyArg_ParseTuple(args, "O!O!|Onii:complete_struct_or_union", &CTypeDescr_Type, &ct, @@ -3617,6 +3636,8 @@ &ignored, &totalsize, &totalalignment, &sflags)) return NULL; + sflags = complete_sflags(sflags); + if ((ct->ct_flags & (CT_STRUCT|CT_IS_OPAQUE)) == (CT_STRUCT|CT_IS_OPAQUE)) { is_union = 0; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2897,7 +2897,7 @@ ('b1', BInt, 9), ('b2', BUInt, 7), ('c', BChar, -1)], -1, -1, -1, flag) - if flag % 2 == 0: # gcc, any variant + if not (flag & SF_MSVC_BITFIELDS): # gcc, any variant assert typeoffsetof(BStruct, 'c') == (BChar, 3) assert sizeof(BStruct) == 4 else: # msvc @@ -2912,20 +2912,20 @@ p.c = b'\x9D' raw = buffer(p)[:] if sys.byteorder == 'little': - if flag == 0 or flag == 2: # gcc, little endian + if flag & SF_MSVC_BITFIELDS: + assert raw == b'A\x00\x00\x007\xC7\x00\x00\x9D\x00\x00\x00' + elif flag & SF_GCC_LITTLE_ENDIAN: assert raw == b'A7\xC7\x9D' - elif flag == 1: # msvc - assert raw == b'A\x00\x00\x007\xC7\x00\x00\x9D\x00\x00\x00' - elif flag == 4: # gcc, big endian + elif flag & SF_GCC_BIG_ENDIAN: assert raw == b'A\xE3\x9B\x9D' else: raise AssertionError("bad flag") else: - if flag == 0 or flag == 2: # gcc + if flag & SF_MSVC_BITFIELDS: + assert raw == b'A\x00\x00\x00\x00\x00\xC77\x9D\x00\x00\x00' + elif flag & SF_GCC_LITTLE_ENDIAN: assert raw == b'A\xC77\x9D' - elif flag == 1: # msvc - assert raw == b'A\x00\x00\x00\x00\x00\xC77\x9D\x00\x00\x00' - elif flag == 4: # gcc, big endian + elif flag & SF_GCC_BIG_ENDIAN: assert raw == b'A\x9B\xE3\x9D' else: raise AssertionError("bad flag") @@ -2935,18 +2935,15 @@ ('', BShort, 9), ('c', BChar, -1)], -1, -1, -1, flag) assert typeoffsetof(BStruct, 'c') == (BChar, 4) - if flag == 0: # gcc + if flag & SF_MSVC_BITFIELDS: + assert sizeof(BStruct) == 6 + assert alignof(BStruct) == 2 + elif flag & SF_GCC_X86_BITFIELDS: assert sizeof(BStruct) == 5 assert alignof(BStruct) == 1 - elif flag == 1: # msvc + elif flag & SF_GCC_ARM_BITFIELDS: assert sizeof(BStruct) == 6 assert alignof(BStruct) == 2 - elif flag == 2: # gcc ARM - assert sizeof(BStruct) == 6 - assert alignof(BStruct) == 2 - elif flag == 4: # gcc, big endian - assert sizeof(BStruct) == 5 - assert alignof(BStruct) == 1 else: raise AssertionError("bad flag") # @@ -2955,37 +2952,43 @@ ('', BInt, 0), ('', BInt, 0), ('c', BChar, -1)], -1, -1, -1, flag) - if flag == 0: # gcc + if flag & SF_MSVC_BITFIELDS: + assert typeoffsetof(BStruct, 'c') == (BChar, 1) + assert sizeof(BStruct) == 2 + assert alignof(BStruct) == 1 + elif flag & SF_GCC_X86_BITFIELDS: assert typeoffsetof(BStruct, 'c') == (BChar, 4) assert sizeof(BStruct) == 5 assert alignof(BStruct) == 1 - elif flag == 1: # msvc - assert typeoffsetof(BStruct, 'c') == (BChar, 1) - assert sizeof(BStruct) == 2 - assert alignof(BStruct) == 1 - elif flag == 2: # gcc ARM + elif flag & SF_GCC_ARM_BITFIELDS: assert typeoffsetof(BStruct, 'c') == (BChar, 4) assert sizeof(BStruct) == 8 assert alignof(BStruct) == 4 - elif flag == 4: # gcc, big endian - assert typeoffsetof(BStruct, 'c') == (BChar, 4) - assert sizeof(BStruct) == 5 - assert alignof(BStruct) == 1 else: raise AssertionError("bad flag") -def test_bitfield_as_gcc(): - _test_bitfield_details(flag=0) +SF_MSVC_BITFIELDS = 0x01 +SF_GCC_ARM_BITFIELDS = 0x02 +SF_GCC_X86_BITFIELDS = 0x10 + +SF_GCC_BIG_ENDIAN = 0x04 +SF_GCC_LITTLE_ENDIAN = 0x40 + +SF_PACKED = 0x08 + +def test_bitfield_as_x86_gcc(): + _test_bitfield_details(flag=SF_GCC_X86_BITFIELDS|SF_GCC_LITTLE_ENDIAN) def test_bitfield_as_msvc(): - _test_bitfield_details(flag=1) + _test_bitfield_details(flag=SF_MSVC_BITFIELDS|SF_GCC_LITTLE_ENDIAN) def test_bitfield_as_arm_gcc(): - _test_bitfield_details(flag=2) + _test_bitfield_details(flag=SF_GCC_ARM_BITFIELDS|SF_GCC_LITTLE_ENDIAN) -def test_bitfield_as_big_endian(): - _test_bitfield_details(flag=4) +def test_bitfield_as_ppc_gcc(): + # PowerPC uses the same format as X86, but is big-endian + _test_bitfield_details(flag=SF_GCC_X86_BITFIELDS|SF_GCC_BIG_ENDIAN) def test_struct_array_no_length(): @@ -3161,7 +3164,7 @@ complete_struct_or_union(BStruct, [('a1', BLong, -1), ('a2', BChar, -1), ('a3', BShort, -1)], - None, -1, -1, 8) # SF_PACKED==8 + None, -1, -1, SF_PACKED) d = BStruct.fields assert len(d) == 3 assert d[0][0] == 'a1' @@ -3190,7 +3193,7 @@ complete_struct_or_union, BStruct, [('a1', BLong, 30), ('a2', BChar, 5)], - None, -1, -1, 8) # SF_PACKED==8 + None, -1, -1, SF_PACKED) def test_version(): # this test is here mostly for PyPy From noreply at buildbot.pypy.org Wed Mar 5 09:27:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 09:27:02 +0100 (CET) Subject: [pypy-commit] pypy default: Update to cffi/9cdae12e006f Message-ID: <20140305082702.744911C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69704:174265c3935f Date: 2014-03-05 09:26 +0100 http://bitbucket.org/pypy/pypy/changeset/174265c3935f/ Log: Update to cffi/9cdae12e006f diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -114,20 +114,43 @@ # ____________________________________________________________ -SF_MSVC_BITFIELDS = 1 -SF_GCC_ARM_BITFIELDS = 2 -SF_GCC_BIG_ENDIAN = 4 -SF_PACKED = 8 + +SF_MSVC_BITFIELDS = 0x01 +SF_GCC_ARM_BITFIELDS = 0x02 +SF_GCC_X86_BITFIELDS = 0x10 + +SF_GCC_BIG_ENDIAN = 0x04 +SF_GCC_LITTLE_ENDIAN = 0x40 + +SF_PACKED = 0x08 + if sys.platform == 'win32': - DEFAULT_SFLAGS = SF_MSVC_BITFIELDS + DEFAULT_SFLAGS_PLATFORM = SF_MSVC_BITFIELDS else: if rffi_platform.getdefined('__arm__', ''): - DEFAULT_SFLAGS = SF_GCC_ARM_BITFIELDS + DEFAULT_SFLAGS_PLATFORM = SF_GCC_ARM_BITFIELDS else: - DEFAULT_SFLAGS = 0 - if sys.byteorder == 'big': - DEFAULT_SFLAGS |= SF_GCC_BIG_ENDIAN + DEFAULT_SFLAGS_PLATFORM = SF_GCC_X86_BITFIELDS + +if sys.byteorder == 'big': + DEFAULT_SFLAGS_ENDIAN = SF_GCC_BIG_ENDIAN +else: + DEFAULT_SFLAGS_ENDIAN = SF_GCC_LITTLE_ENDIAN + + +def complete_sflags(sflags): + # add one of the SF_xxx_BITFIELDS flags if none is specified + if not (sflags & (SF_MSVC_BITFIELDS | SF_GCC_ARM_BITFIELDS | + SF_GCC_X86_BITFIELDS)): + sflags |= DEFAULT_SFLAGS_PLATFORM + # add one of SF_GCC_xx_ENDIAN if none is specified + if not (sflags & (SF_GCC_BIG_ENDIAN | SF_GCC_LITTLE_ENDIAN)): + sflags |= DEFAULT_SFLAGS_ENDIAN + return sflags + +# ____________________________________________________________ + @unwrap_spec(name=str) def new_struct_type(space, name): @@ -140,8 +163,8 @@ @unwrap_spec(w_ctype=ctypeobj.W_CType, totalsize=int, totalalignment=int, sflags=int) def complete_struct_or_union(space, w_ctype, w_fields, w_ignored=None, - totalsize=-1, totalalignment=-1, - sflags=DEFAULT_SFLAGS): + totalsize=-1, totalalignment=-1, sflags=0): + sflags = complete_sflags(sflags) if (not isinstance(w_ctype, ctypestruct.W_CTypeStructOrUnion) or w_ctype.size >= 0): raise OperationError(space.w_TypeError, diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2886,7 +2886,7 @@ ('b1', BInt, 9), ('b2', BUInt, 7), ('c', BChar, -1)], -1, -1, -1, flag) - if flag % 2 == 0: # gcc, any variant + if not (flag & SF_MSVC_BITFIELDS): # gcc, any variant assert typeoffsetof(BStruct, 'c') == (BChar, 3) assert sizeof(BStruct) == 4 else: # msvc @@ -2901,20 +2901,20 @@ p.c = b'\x9D' raw = buffer(p)[:] if sys.byteorder == 'little': - if flag == 0 or flag == 2: # gcc, little endian + if flag & SF_MSVC_BITFIELDS: + assert raw == b'A\x00\x00\x007\xC7\x00\x00\x9D\x00\x00\x00' + elif flag & SF_GCC_LITTLE_ENDIAN: assert raw == b'A7\xC7\x9D' - elif flag == 1: # msvc - assert raw == b'A\x00\x00\x007\xC7\x00\x00\x9D\x00\x00\x00' - elif flag == 4: # gcc, big endian + elif flag & SF_GCC_BIG_ENDIAN: assert raw == b'A\xE3\x9B\x9D' else: raise AssertionError("bad flag") else: - if flag == 0 or flag == 2: # gcc + if flag & SF_MSVC_BITFIELDS: + assert raw == b'A\x00\x00\x00\x00\x00\xC77\x9D\x00\x00\x00' + elif flag & SF_GCC_LITTLE_ENDIAN: assert raw == b'A\xC77\x9D' - elif flag == 1: # msvc - assert raw == b'A\x00\x00\x00\x00\x00\xC77\x9D\x00\x00\x00' - elif flag == 4: # gcc, big endian + elif flag & SF_GCC_BIG_ENDIAN: assert raw == b'A\x9B\xE3\x9D' else: raise AssertionError("bad flag") @@ -2924,18 +2924,15 @@ ('', BShort, 9), ('c', BChar, -1)], -1, -1, -1, flag) assert typeoffsetof(BStruct, 'c') == (BChar, 4) - if flag == 0: # gcc + if flag & SF_MSVC_BITFIELDS: + assert sizeof(BStruct) == 6 + assert alignof(BStruct) == 2 + elif flag & SF_GCC_X86_BITFIELDS: assert sizeof(BStruct) == 5 assert alignof(BStruct) == 1 - elif flag == 1: # msvc + elif flag & SF_GCC_ARM_BITFIELDS: assert sizeof(BStruct) == 6 assert alignof(BStruct) == 2 - elif flag == 2: # gcc ARM - assert sizeof(BStruct) == 6 - assert alignof(BStruct) == 2 - elif flag == 4: # gcc, big endian - assert sizeof(BStruct) == 5 - assert alignof(BStruct) == 1 else: raise AssertionError("bad flag") # @@ -2944,37 +2941,43 @@ ('', BInt, 0), ('', BInt, 0), ('c', BChar, -1)], -1, -1, -1, flag) - if flag == 0: # gcc + if flag & SF_MSVC_BITFIELDS: + assert typeoffsetof(BStruct, 'c') == (BChar, 1) + assert sizeof(BStruct) == 2 + assert alignof(BStruct) == 1 + elif flag & SF_GCC_X86_BITFIELDS: assert typeoffsetof(BStruct, 'c') == (BChar, 4) assert sizeof(BStruct) == 5 assert alignof(BStruct) == 1 - elif flag == 1: # msvc - assert typeoffsetof(BStruct, 'c') == (BChar, 1) - assert sizeof(BStruct) == 2 - assert alignof(BStruct) == 1 - elif flag == 2: # gcc ARM + elif flag & SF_GCC_ARM_BITFIELDS: assert typeoffsetof(BStruct, 'c') == (BChar, 4) assert sizeof(BStruct) == 8 assert alignof(BStruct) == 4 - elif flag == 4: # gcc, big endian - assert typeoffsetof(BStruct, 'c') == (BChar, 4) - assert sizeof(BStruct) == 5 - assert alignof(BStruct) == 1 else: raise AssertionError("bad flag") -def test_bitfield_as_gcc(): - _test_bitfield_details(flag=0) +SF_MSVC_BITFIELDS = 0x01 +SF_GCC_ARM_BITFIELDS = 0x02 +SF_GCC_X86_BITFIELDS = 0x10 + +SF_GCC_BIG_ENDIAN = 0x04 +SF_GCC_LITTLE_ENDIAN = 0x40 + +SF_PACKED = 0x08 + +def test_bitfield_as_x86_gcc(): + _test_bitfield_details(flag=SF_GCC_X86_BITFIELDS|SF_GCC_LITTLE_ENDIAN) def test_bitfield_as_msvc(): - _test_bitfield_details(flag=1) + _test_bitfield_details(flag=SF_MSVC_BITFIELDS|SF_GCC_LITTLE_ENDIAN) def test_bitfield_as_arm_gcc(): - _test_bitfield_details(flag=2) + _test_bitfield_details(flag=SF_GCC_ARM_BITFIELDS|SF_GCC_LITTLE_ENDIAN) -def test_bitfield_as_big_endian(): - _test_bitfield_details(flag=4) +def test_bitfield_as_ppc_gcc(): + # PowerPC uses the same format as X86, but is big-endian + _test_bitfield_details(flag=SF_GCC_X86_BITFIELDS|SF_GCC_BIG_ENDIAN) def test_struct_array_no_length(): @@ -3150,7 +3153,7 @@ complete_struct_or_union(BStruct, [('a1', BLong, -1), ('a2', BChar, -1), ('a3', BShort, -1)], - None, -1, -1, 8) # SF_PACKED==8 + None, -1, -1, SF_PACKED) d = BStruct.fields assert len(d) == 3 assert d[0][0] == 'a1' @@ -3179,7 +3182,7 @@ complete_struct_or_union, BStruct, [('a1', BLong, 30), ('a2', BChar, 5)], - None, -1, -1, 8) # SF_PACKED==8 + None, -1, -1, SF_PACKED) def test_version(): # this test is here mostly for PyPy From noreply at buildbot.pypy.org Wed Mar 5 09:34:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 09:34:10 +0100 (CET) Subject: [pypy-commit] stmgc default: Copying the hash/id logic from minimark.py, first part Message-ID: <20140305083410.6A4C01C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r955:8185ee16c279 Date: 2014-03-05 08:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/8185ee16c279/ Log: Copying the hash/id logic from minimark.py, first part diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -46,6 +46,14 @@ */ GCFLAG_SMALL_UNIFORM = 0x02, + /* The following flag is set on nursery objects of which we asked + the id or the identityhash. It means that a space of the size of + the object has already been allocated in the nonmovable part. + The same flag is abused to mark prebuilt objects whose hash has + been taken during translation and is statically recorded just + after the object. */ + GCFLAG_HAS_SHADOW = 0x04, + /* All remaining bits of the 32-bit 'stm_flags' field are taken by the "overflow number". This is a number that identifies the "overflow objects" from the current transaction among all old @@ -53,7 +61,7 @@ current transaction that have been flushed out of the nursery, which occurs if the same transaction allocates too many objects. */ - GCFLAG_OVERFLOW_NUMBER_bit0 = 0x04 /* must be last */ + GCFLAG_OVERFLOW_NUMBER_bit0 = 0x08 /* must be last */ }; diff --git a/c7/stm/prebuilt.c b/c7/stm/prebuilt.c --- a/c7/stm/prebuilt.c +++ b/c7/stm/prebuilt.c @@ -27,9 +27,10 @@ return; } - /* We need to make a copy of this object. */ + /* We need to make a copy of this object. The extra "long" is for + the prebuilt hash. */ size_t size = stmcb_size_rounded_up(obj); - object_t *nobj = _stm_allocate_old(size); + object_t *nobj = _stm_allocate_old(size + sizeof(long)); /* Copy the object */ char *realnobj = REAL_ADDRESS(stm_object_pages, nobj); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -22,6 +22,7 @@ #include "stm/nursery.c" #include "stm/sync.c" #include "stm/setup.c" +#include "stm/hash_id.c" #include "stm/core.c" #include "stm/contention.c" #include "stm/fprintcolor.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -261,6 +261,14 @@ static structure, but it should never be used anyway.) */ object_t *stm_setup_prebuilt(object_t *); +/* Hash, id. The id is just the address of the object (of the address + where it *will* be after the next minor collection). The hash is the + same, mangled -- except on prebuilt objects, where it can be + controlled for each prebuilt object individually. (Useful uor PyPy) */ +long stm_identityhash(object_t *obj); +long stm_id(object_t *obj); +void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash); + /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -74,6 +74,10 @@ void stm_collect(long level); uint64_t _stm_total_allocated(void); + +long stm_identityhash(object_t *obj); +long stm_id(object_t *obj); +void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash); """) diff --git a/c7/test/test_hash_id.py b/c7/test/test_hash_id.py new file mode 100644 --- /dev/null +++ b/c7/test/test_hash_id.py @@ -0,0 +1,39 @@ +from support import * +from test_prebuilt import prebuilt +import py + +class TestHashId(BaseTest): + + def test_hash_old_object(self): + lp1 = stm_allocate_old(16) + lp2 = stm_allocate_old(16) + lp3 = stm_allocate_old(16) + lp4 = stm_allocate_old(16) + self.start_transaction() + h1 = lib.stm_identityhash(lp1) + h2 = lib.stm_identityhash(lp2) + h3 = lib.stm_identityhash(lp3) + h4 = lib.stm_identityhash(lp4) + assert len(set([h1, h2, h3, h4])) == 4 # guaranteed by the algo + + def test_id_old_object(self): + lp1 = stm_allocate_old(16) + self.start_transaction() + h1 = lib.stm_id(lp1) + assert h1 == int(ffi.cast("long", lp1)) + + def test_set_prebuilt_identityhash(self): + static1 = prebuilt(16) + static2 = prebuilt(16) + lp1 = lib.stm_setup_prebuilt(static1) + lp2 = lib.stm_setup_prebuilt(static2) + lib.stm_set_prebuilt_identityhash(lp1, 42) + self.start_transaction() + h1 = lib.stm_identityhash(lp1) + h2 = lib.stm_identityhash(lp2) + assert h1 == 42 + assert h2 != 0 + h1 = lib.stm_id(lp1) + h2 = lib.stm_id(lp2) + assert h1 == int(ffi.cast("long", lp1)) + assert h2 == int(ffi.cast("long", lp2)) From noreply at buildbot.pypy.org Wed Mar 5 09:34:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 09:34:11 +0100 (CET) Subject: [pypy-commit] stmgc default: Update TODO Message-ID: <20140305083411.AECE21C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r956:18839ad2d545 Date: 2014-03-05 09:33 +0100 http://bitbucket.org/pypy/stmgc/changeset/18839ad2d545/ Log: Update TODO diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -1,3 +1,6 @@ + +known-working revision: 5e4ec1af0e0c + - use small uniform gcpages @@ -9,3 +12,5 @@ - the highest_overflow_number can overflow after 2**30 non-collect-time minor collections + +- re-enable the buggy RESHARE_PAGES=1, probably with a better impl From noreply at buildbot.pypy.org Wed Mar 5 09:36:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 09:36:50 +0100 (CET) Subject: [pypy-commit] stmgc default: Forgot to add in 8185ee16c279 Message-ID: <20140305083650.060BD1C0632@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r957:65ff01990d5d Date: 2014-03-05 09:36 +0100 http://bitbucket.org/pypy/stmgc/changeset/65ff01990d5d/ Log: Forgot to add in 8185ee16c279 diff --git a/c7/stm/hash_id.c b/c7/stm/hash_id.c new file mode 100644 --- /dev/null +++ b/c7/stm/hash_id.c @@ -0,0 +1,69 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +static long mangle_hash(long i) +{ + /* To hash pointers in dictionaries. Assumes that i shows some + alignment (to 8, 16, maybe 32 bytes), so we use the following + formula to avoid the trailing bits being always 0. */ + return i ^ (i >> 5); +} + +static long id_or_identityhash(object_t *obj, bool is_hash) +{ + long result; + + if (obj != NULL) { + if (_is_in_nursery(obj)) { + abort();//obj = find_shadow(obj); + } + else if (is_hash) { + if (obj->stm_flags & GCFLAG_HAS_SHADOW) { + + /* For identityhash(), we need a special case for some + prebuilt objects: their hash must be the same before + and after translation. It is stored as an extra word + after the object. But we cannot use it for id() + because the stored value might clash with a real one. + */ + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size_t size = stmcb_size_rounded_up(realobj); + result = *(long *)(((char *)realobj) + size); + /* Important: the returned value is not mangle_hash()ed! */ + return result; + } + } + } + + result = (long)(uintptr_t)obj; + if (is_hash) { + result = mangle_hash(result); + } + return result; +} + +long stm_id(object_t *obj) +{ + return id_or_identityhash(obj, false); +} + +long stm_identityhash(object_t *obj) +{ + return id_or_identityhash(obj, true); +} + +void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash) +{ + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(stm_object_pages, obj); + + assert(realobj->stm_flags == GCFLAG_WRITE_BARRIER); + realobj->stm_flags |= GCFLAG_HAS_SHADOW; + + size_t size = stmcb_size_rounded_up(realobj); + assert(*(long *)(((char *)realobj) + size) == 0); + *(long *)(((char *)realobj) + size) = hash; +} From noreply at buildbot.pypy.org Wed Mar 5 09:44:33 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 5 Mar 2014 09:44:33 +0100 (CET) Subject: [pypy-commit] pypy default: make a strange error go away (don't call it with None though) Message-ID: <20140305084433.D65361C0632@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69705:5a8c87bf1104 Date: 2014-03-05 10:43 +0200 http://bitbucket.org/pypy/pypy/changeset/5a8c87bf1104/ Log: make a strange error go away (don't call it with None though) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -137,6 +137,7 @@ # with other wrappers that directly handle unicode strings. @specialize.argtype(0) def open(path, flags, mode): + assert path is not None if isinstance(path, str): return os.open(path, flags, mode) else: From noreply at buildbot.pypy.org Wed Mar 5 10:10:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 10:10:44 +0100 (CET) Subject: [pypy-commit] stmgc default: Finish copying the shadow logic from pypy. Message-ID: <20140305091044.4DCBB1D26FD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r958:cb55e6140145 Date: 2014-03-05 10:10 +0100 http://bitbucket.org/pypy/stmgc/changeset/cb55e6140145/ Log: Finish copying the shadow logic from pypy. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -36,7 +36,7 @@ by finding that we already own the write-lock. */ uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; uint8_t lock_num = STM_PSEGMENT->write_lock_num; - assert((intptr_t)lock_idx >= 0); + assert(lock_idx < sizeof(write_locks)); retry: if (write_locks[lock_idx] == 0) { if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[lock_idx], @@ -179,6 +179,7 @@ assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); + assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); @@ -300,7 +301,7 @@ /* clear the write-lock (note that this runs with all other threads paused, so no need to be careful about ordering) */ uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; - assert((intptr_t)lock_idx >= 0); + assert(lock_idx < sizeof(write_locks)); assert(write_locks[lock_idx] == STM_PSEGMENT->write_lock_num); write_locks[lock_idx] = 0; @@ -371,7 +372,8 @@ /* update 'overflow_number' if needed */ if (STM_PSEGMENT->overflow_number_has_been_used) { highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0; - assert(highest_overflow_number != 0); /* XXX else, overflow! */ + assert(highest_overflow_number != /* XXX else, overflow! */ + (uint32_t)-GCFLAG_OVERFLOW_NUMBER_bit0); STM_PSEGMENT->overflow_number = highest_overflow_number; STM_PSEGMENT->overflow_number_has_been_used = false; } @@ -440,7 +442,7 @@ /* clear the write-lock */ uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; - assert((intptr_t)lock_idx >= 0); + assert(lock_idx < sizeof(write_locks)); assert(write_locks[lock_idx] == pseg->write_lock_num); write_locks[lock_idx] = 0; })); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -95,11 +95,16 @@ current transaction spanned a minor collection. */ struct list_s *large_overflow_objects; - /* List of all young objects outside the nursery ("young" in the + /* Set of all young objects outside the nursery ("young" in the sense that they should be in the nursery, but were too big for that). */ struct tree_s *young_outside_nursery; + /* Support for id and identityhash: this is a dict mapping nursery + objects with GCFLAG_HAS_SHADOW to their future location at the + next minor collection. */ + struct tree_s *nursery_objects_shadows; + /* Start time: to know approximately for how long a transaction has been running, in contention management */ uint64_t start_time; diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -151,7 +151,6 @@ static inline uintptr_t mark_loc(object_t *obj) { uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; - assert(lock_idx >= 0); assert(lock_idx < sizeof(write_locks)); return lock_idx; } diff --git a/c7/stm/hash_id.c b/c7/stm/hash_id.c --- a/c7/stm/hash_id.c +++ b/c7/stm/hash_id.c @@ -17,7 +17,7 @@ if (obj != NULL) { if (_is_in_nursery(obj)) { - abort();//obj = find_shadow(obj); + obj = find_shadow(obj); } else if (is_hash) { if (obj->stm_flags & GCFLAG_HAS_SHADOW) { diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -53,10 +53,12 @@ return _is_in_nursery(obj); } +static object_t *find_existing_shadow(object_t *obj); + /************************************************************/ -#define GCWORD_MOVED ((object_t *) -42) +#define GCWORD_MOVED ((object_t *) -1) #define FLAG_SYNC_LARGE 0x01 @@ -76,18 +78,33 @@ to GCWORD_MOVED. In that case, the forwarding location, i.e. where the object moved to, is stored in the second word in 'obj'. */ object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; + char *realobj; + size_t size; - if (pforwarded_array[0] == GCWORD_MOVED) { - *pobj = pforwarded_array[1]; /* already moved */ - return; + if (obj->stm_flags & GCFLAG_HAS_SHADOW) { + /* ^^ the single check above detects both already-moved objects + and objects with HAS_SHADOW. This is because GCWORD_MOVED + overrides completely the stm_flags field with 1's bits. */ + + if (LIKELY(pforwarded_array[0] == GCWORD_MOVED)) { + *pobj = pforwarded_array[1]; /* already moved */ + return; + } + else { + /* really has a shadow */ + nobj = find_existing_shadow(obj); + obj->stm_flags &= ~GCFLAG_HAS_SHADOW; + realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size = stmcb_size_rounded_up((struct object_s *)realobj); + goto copy_large_object; + } } - /* We need to make a copy of this object. It goes either in a largemalloc.c-managed area, or if it's small enough, in one of the small uniform pages from gcpage.c. */ - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - size_t size = stmcb_size_rounded_up((struct object_s *)realobj); + realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size = stmcb_size_rounded_up((struct object_s *)realobj); if (1 /*size >= GC_N_SMALL_REQUESTS*8*/) { @@ -97,6 +114,7 @@ nobj = (object_t *)(allocated - stm_object_pages); /* Copy the object */ + copy_large_object:; char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); memcpy(realnobj, realobj, size); @@ -229,6 +247,8 @@ tree_clear(pseg->young_outside_nursery); } + + tree_clear(pseg->nursery_objects_shadows); } #define MINOR_NOTHING_TO_DO(pseg) \ @@ -340,7 +360,7 @@ char *result = allocate_outside_nursery_large(size_rounded_up); object_t *o = (object_t *)(result - stm_object_pages); - tree_insert(STM_PSEGMENT->young_outside_nursery, (intptr_t)o, 0); + tree_insert(STM_PSEGMENT->young_outside_nursery, (uintptr_t)o, 0); memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); return o; @@ -413,3 +433,60 @@ set_gs_register(get_segment_base(original_num)); } + +static object_t *allocate_shadow(object_t *obj) +{ + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size_t size = stmcb_size_rounded_up((struct object_s *)realobj); + + /* always gets outside as a large object for now */ + char *allocated = allocate_outside_nursery_large(size); + object_t *nobj = (object_t *)(allocated - stm_object_pages); + + /* Initialize the shadow enough to be considered a valid gc object. + If the original object stays alive at the next minor collection, + it will anyway be copied over the shadow and overwrite the + following fields. But if the object dies, then the shadow will + stay around and only be freed at the next major collection, at + which point we want it to look valid (but ready to be freed). + + Here, in the general case, it requires copying the whole object. + It could be more optimized in special cases like in PyPy, by + copying only the typeid and (for var-sized objects) the length + field. It's probably overkill to add a special stmcb_xxx + interface just for that. + */ + char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); + memcpy(realnobj, realobj, size); + + obj->stm_flags |= GCFLAG_HAS_SHADOW; + tree_insert(STM_PSEGMENT->nursery_objects_shadows, + (uintptr_t)obj, (uintptr_t)nobj); + return nobj; +} + +static object_t *find_existing_shadow(object_t *obj) +{ + wlog_t *item; + + TREE_FIND(*STM_PSEGMENT->nursery_objects_shadows, + (uintptr_t)obj, item, goto not_found); + + /* The answer is the address of the shadow. */ + return (object_t *)item->val; + + not_found: + stm_fatalerror("GCFLAG_HAS_SHADOW but no shadow found"); +} + +static object_t *find_shadow(object_t *obj) +{ + /* The object 'obj' is still in the nursery. Find or allocate a + "shadow" object, which is where the object will be moved by the + next minor collection + */ + if (obj->stm_flags & GCFLAG_HAS_SHADOW) + return find_existing_shadow(obj); + else + return allocate_shadow(obj); +} diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -19,3 +19,5 @@ } static void assert_memset_zero(void *s, size_t n); + +static object_t *find_shadow(object_t *obj); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -58,6 +58,7 @@ pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); pr->young_outside_nursery = tree_create(); + pr->nursery_objects_shadows = tree_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * (i + 1); highest_overflow_number = pr->overflow_number; } @@ -94,6 +95,7 @@ assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); tree_free(pr->young_outside_nursery); + tree_free(pr->nursery_objects_shadows); } munmap(stm_object_pages, TOTAL_MEMORY); diff --git a/c7/test/test_hash_id.py b/c7/test/test_hash_id.py --- a/c7/test/test_hash_id.py +++ b/c7/test/test_hash_id.py @@ -37,3 +37,38 @@ h2 = lib.stm_id(lp2) assert h1 == int(ffi.cast("long", lp1)) assert h2 == int(ffi.cast("long", lp2)) + + def test_hash_nursery(self): + self.start_transaction() + lp1 = stm_allocate(16) + lp2 = stm_allocate(16) + lp3 = stm_allocate(16) + lp4 = stm_allocate(16) + h1 = lib.stm_identityhash(lp1) + h2 = lib.stm_identityhash(lp2) + h3 = lib.stm_identityhash(lp3) + h4 = lib.stm_identityhash(lp4) + assert len(set([h1, h2, h3, h4])) == 4 # guaranteed by the algo + + def test_hash_lower_bits(self): + self.start_transaction() + lp1 = stm_allocate(32) + lp2 = stm_allocate(32) + lp3 = stm_allocate(32) + lp4 = stm_allocate(32) + h1 = lib.stm_identityhash(lp1) + h2 = lib.stm_identityhash(lp2) + h3 = lib.stm_identityhash(lp3) + h4 = lib.stm_identityhash(lp4) + assert len(set([h1 & 15, h2 & 15, h3 & 15, h4 & 15])) == 4 + + def test_hash_around_minor_collect(self): + self.start_transaction() + lp = stm_allocate(16) + h1 = lib.stm_identityhash(lp) + self.push_root(lp) + stm_minor_collect() + lp = self.pop_root() + h2 = lib.stm_identityhash(lp) + assert h2 == h1 + assert h2 != lib.stm_id(lp) From noreply at buildbot.pypy.org Wed Mar 5 10:12:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 10:12:18 +0100 (CET) Subject: [pypy-commit] stmgc default: done Message-ID: <20140305091218.054891D2736@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r959:202ea90b3c60 Date: 2014-03-05 10:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/202ea90b3c60/ Log: done diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -6,7 +6,6 @@ - write barrier for big arrays -- hash, id - weakrefs - finalizers From noreply at buildbot.pypy.org Wed Mar 5 10:17:23 2014 From: noreply at buildbot.pypy.org (krono) Date: Wed, 5 Mar 2014 10:17:23 +0100 (CET) Subject: [pypy-commit] pypy popen-pclose: Provide an exit status for popen'ed RFiles via pclose Message-ID: <20140305091723.5B8D01D282A@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: popen-pclose Changeset: r69706:58405322f403 Date: 2014-03-03 17:54 +0100 http://bitbucket.org/pypy/pypy/changeset/58405322f403/ Log: Provide an exit status for popen'ed RFiles via pclose diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -116,7 +116,13 @@ rffi.free_nonmovingbuffer(value, ll_value) def close(self): + """ Closes the described file. + +Attention! Unlike Python semantics, `close' does not return `None' upon +success but `0', to be able to return an exit code for popen'ed files """ + ll_f = self.ll_file + res = 0 if ll_f: # double close is allowed self.ll_file = lltype.nullptr(FILE) @@ -124,6 +130,7 @@ if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) + return os.WEXITSTATUS(res) _do_close = staticmethod(c_close) # overridden in RPopenFile diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -196,3 +196,14 @@ s = f.read() f.close() assert s == '42\n' + + def test_pclose(self): + retval = 32 + printval = 42 + cmd = "python -c 'import sys; print %s; sys.exit(%s)'" % ( + printval, retval) + f = rfile.create_popen_file(cmd, "r") + s = f.read() + r = f.close() + assert s == "%s\n" % printval + assert r == retval From noreply at buildbot.pypy.org Wed Mar 5 10:17:24 2014 From: noreply at buildbot.pypy.org (krono) Date: Wed, 5 Mar 2014 10:17:24 +0100 (CET) Subject: [pypy-commit] pypy popen-pclose: Add interpret-tests Message-ID: <20140305091724.DC8511D282A@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: popen-pclose Changeset: r69707:cf48985f2c5b Date: 2014-03-03 20:00 +0100 http://bitbucket.org/pypy/pypy/changeset/cf48985f2c5b/ Log: Add interpret-tests Also: adapt docstring as per fijal's request diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -116,11 +116,11 @@ rffi.free_nonmovingbuffer(value, ll_value) def close(self): - """ Closes the described file. + """Closes the described file. -Attention! Unlike Python semantics, `close' does not return `None' upon -success but `0', to be able to return an exit code for popen'ed files """ - + Attention! Unlike Python semantics, `close' does not return `None' upon + success but `0', to be able to return an exit code for popen'ed files + """ ll_f = self.ll_file res = 0 if ll_f: diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -186,7 +186,7 @@ f.close() -class TestPopen: +class TestPopen(object): def setup_class(cls): if sys.platform == 'win32': py.test.skip("not for win32") @@ -207,3 +207,24 @@ r = f.close() assert s == "%s\n" % printval assert r == retval + +class TestPopenR(BaseRtypingTest): + def setup_class(cls): + if sys.platform == 'win32': + py.test.skip("not for win32") + + def test_popen(self): + def f(): + f = rfile.create_popen_file("python -c 'print 42'", "r") + s = f.read() + f.close() + self.interpret(f, []) + + def test_pclose(self): + retval = 32 + cmd = "python -c 'import sys; print 45; sys.exit(%s)'" % retval + def f(): + f = rfile.create_popen_file(cmd, "r") + s = f.read() + return f.close() + r = self.interpret(f, []) From noreply at buildbot.pypy.org Wed Mar 5 10:17:26 2014 From: noreply at buildbot.pypy.org (krono) Date: Wed, 5 Mar 2014 10:17:26 +0100 (CET) Subject: [pypy-commit] pypy popen-pclose: more asserts for rfile test Message-ID: <20140305091726.3A18D1D282C@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: popen-pclose Changeset: r69708:7cccc9080c5b Date: 2014-03-04 11:33 +0100 http://bitbucket.org/pypy/pypy/changeset/7cccc9080c5b/ Log: more asserts for rfile test diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -214,17 +214,24 @@ py.test.skip("not for win32") def test_popen(self): + printval = 42 + cmd = "python -c 'print %s'" % printval def f(): - f = rfile.create_popen_file("python -c 'print 42'", "r") + f = rfile.create_popen_file(cmd, "r") s = f.read() f.close() + assert s == "%s\n" % printval self.interpret(f, []) def test_pclose(self): + printval = 42 retval = 32 - cmd = "python -c 'import sys; print 45; sys.exit(%s)'" % retval + cmd = "python -c 'import sys; print %s; sys.exit(%s)'" % ( + printval, retval) def f(): f = rfile.create_popen_file(cmd, "r") s = f.read() + assert s == "%s\n" % printval return f.close() r = self.interpret(f, []) + assert r == retval From noreply at buildbot.pypy.org Wed Mar 5 10:17:27 2014 From: noreply at buildbot.pypy.org (krono) Date: Wed, 5 Mar 2014 10:17:27 +0100 (CET) Subject: [pypy-commit] pypy popen-pclose: Exit status masking is not done by close but by its caller Message-ID: <20140305091727.934711D282C@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: popen-pclose Changeset: r69709:ba703e79044d Date: 2014-03-04 12:18 +0100 http://bitbucket.org/pypy/pypy/changeset/ba703e79044d/ Log: Exit status masking is not done by close but by its caller diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -119,7 +119,9 @@ """Closes the described file. Attention! Unlike Python semantics, `close' does not return `None' upon - success but `0', to be able to return an exit code for popen'ed files + success but `0', to be able to return an exit code for popen'ed files. + + The actual return value may be determined with os.WEXITSTATUS. """ ll_f = self.ll_file res = 0 @@ -130,7 +132,7 @@ if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) - return os.WEXITSTATUS(res) + return res _do_close = staticmethod(c_close) # overridden in RPopenFile diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -206,7 +206,7 @@ s = f.read() r = f.close() assert s == "%s\n" % printval - assert r == retval + assert os.WEXITSTATUS(r) == retval class TestPopenR(BaseRtypingTest): def setup_class(cls): @@ -234,4 +234,4 @@ assert s == "%s\n" % printval return f.close() r = self.interpret(f, []) - assert r == retval + assert os.WEXITSTATUS(r) == retval From noreply at buildbot.pypy.org Wed Mar 5 10:17:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 10:17:28 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in krono/pypy/popen-pclose (pull request #206) Message-ID: <20140305091728.E08D51D282D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69710:7532271a94d7 Date: 2014-03-05 10:16 +0100 http://bitbucket.org/pypy/pypy/changeset/7532271a94d7/ Log: Merged in krono/pypy/popen-pclose (pull request #206) Provide an exit status for popen'ed RFiles via pclose diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -125,7 +125,15 @@ rffi.free_nonmovingbuffer(value, ll_value) def close(self): + """Closes the described file. + + Attention! Unlike Python semantics, `close' does not return `None' upon + success but `0', to be able to return an exit code for popen'ed files. + + The actual return value may be determined with os.WEXITSTATUS. + """ ll_f = self.ll_file + res = 0 if ll_f: # double close is allowed self.ll_file = lltype.nullptr(FILE) @@ -133,6 +141,7 @@ if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) + return res _do_close = staticmethod(c_close) # overridden in RPopenFile diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -186,7 +186,7 @@ f.close() -class TestPopen: +class TestPopen(object): def setup_class(cls): if sys.platform == 'win32': py.test.skip("not for win32") @@ -196,3 +196,42 @@ s = f.read() f.close() assert s == '42\n' + + def test_pclose(self): + retval = 32 + printval = 42 + cmd = "python -c 'import sys; print %s; sys.exit(%s)'" % ( + printval, retval) + f = rfile.create_popen_file(cmd, "r") + s = f.read() + r = f.close() + assert s == "%s\n" % printval + assert os.WEXITSTATUS(r) == retval + +class TestPopenR(BaseRtypingTest): + def setup_class(cls): + if sys.platform == 'win32': + py.test.skip("not for win32") + + def test_popen(self): + printval = 42 + cmd = "python -c 'print %s'" % printval + def f(): + f = rfile.create_popen_file(cmd, "r") + s = f.read() + f.close() + assert s == "%s\n" % printval + self.interpret(f, []) + + def test_pclose(self): + printval = 42 + retval = 32 + cmd = "python -c 'import sys; print %s; sys.exit(%s)'" % ( + printval, retval) + def f(): + f = rfile.create_popen_file(cmd, "r") + s = f.read() + assert s == "%s\n" % printval + return f.close() + r = self.interpret(f, []) + assert os.WEXITSTATUS(r) == retval From noreply at buildbot.pypy.org Wed Mar 5 10:35:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 10:35:02 +0100 (CET) Subject: [pypy-commit] pypy default: Duplicate the tests to also run in a plain ascii mode. Useful because I Message-ID: <20140305093502.C66841D2859@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69712:1273570cb7ad Date: 2014-03-05 10:33 +0100 http://bitbucket.org/pypy/pypy/changeset/1273570cb7ad/ Log: Duplicate the tests to also run in a plain ascii mode. Useful because I can't find a machine which doesn't just skip all these tests with "encoding not good enough". diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -25,10 +25,9 @@ def as_unicode(self): return self.unistr -class TestPosixUnicode: +class BasePosixUnicode: def setup_method(self, method): - self.ufilename = (unicode(udir.join('test_open')) + - u'\u65e5\u672c.txt') # "Japan" + self.ufilename = self._get_filename() try: f = file(self.ufilename, 'w') except UnicodeEncodeError: @@ -148,3 +147,13 @@ rposix.unsetenv(self.path) interpret(f, []) # does not crash + + +class TestPosixAscii(BasePosixUnicode): + def _get_filename(self): + return str(udir.join('test_open_ascii')) + +class TestPosixUnicode(BasePosixUnicode): + def _get_filename(self): + return (unicode(udir.join('test_open')) + + u'\u65e5\u672c.txt') # "Japan" From noreply at buildbot.pypy.org Wed Mar 5 10:38:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 10:38:17 +0100 (CET) Subject: [pypy-commit] pypy default: Adding the missing "assert path is not None" to *all* *other* Message-ID: <20140305093817.537FB1D285A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69713:4c5d9fd4c40d Date: 2014-03-05 10:35 +0100 http://bitbucket.org/pypy/pypy/changeset/4c5d9fd4c40d/ Log: Adding the missing "assert path is not None" to *all* *other* *functions* would be too much of a pain. Instead, refactor. diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -136,141 +136,92 @@ # - but rpython.rtyper.module.ll_os.py on Windows will replace these functions # with other wrappers that directly handle unicode strings. @specialize.argtype(0) -def open(path, flags, mode): +def _as_bytes(path): assert path is not None if isinstance(path, str): - return os.open(path, flags, mode) + return path else: - return os.open(path.as_bytes(), flags, mode) + return path.as_bytes() + + at specialize.argtype(0) +def open(path, flags, mode): + return os.open(_as_bytes(path), flags, mode) @specialize.argtype(0) def stat(path): - if isinstance(path, str): - return os.stat(path) - else: - return os.stat(path.as_bytes()) + return os.stat(_as_bytes(path)) @specialize.argtype(0) def lstat(path): - if isinstance(path, str): - return os.lstat(path) - else: - return os.lstat(path.as_bytes()) + return os.lstat(_as_bytes(path)) @specialize.argtype(0) def statvfs(path): - if isinstance(path, str): - return os.statvfs(path) - else: - return os.statvfs(path.as_bytes()) + return os.statvfs(_as_bytes(path)) @specialize.argtype(0) def unlink(path): - if isinstance(path, str): - return os.unlink(path) - else: - return os.unlink(path.as_bytes()) + return os.unlink(_as_bytes(path)) @specialize.argtype(0, 1) def rename(path1, path2): - if isinstance(path1, str): - return os.rename(path1, path2) - else: - return os.rename(path1.as_bytes(), path2.as_bytes()) + return os.rename(_as_bytes(path1), _as_bytes(path2)) @specialize.argtype(0) def listdir(dirname): - if isinstance(dirname, str): - return os.listdir(dirname) - else: - return os.listdir(dirname.as_bytes()) + return os.listdir(_as_bytes(dirname)) @specialize.argtype(0) def access(path, mode): - if isinstance(path, str): - return os.access(path, mode) - else: - return os.access(path.as_bytes(), mode) + return os.access(_as_bytes(path), mode) @specialize.argtype(0) def chmod(path, mode): - if isinstance(path, str): - return os.chmod(path, mode) - else: - return os.chmod(path.as_bytes(), mode) + return os.chmod(_as_bytes(path), mode) @specialize.argtype(0, 1) def utime(path, times): - if isinstance(path, str): - return os.utime(path, times) - else: - return os.utime(path.as_bytes(), times) + return os.utime(_as_bytes(path), times) @specialize.argtype(0) def chdir(path): - if isinstance(path, str): - return os.chdir(path) - else: - return os.chdir(path.as_bytes()) + return os.chdir(_as_bytes(path)) @specialize.argtype(0) def mkdir(path, mode=0777): - if isinstance(path, str): - return os.mkdir(path, mode) - else: - return os.mkdir(path.as_bytes(), mode) + return os.mkdir(_as_bytes(path), mode) @specialize.argtype(0) def rmdir(path): - if isinstance(path, str): - return os.rmdir(path) - else: - return os.rmdir(path.as_bytes()) + return os.rmdir(_as_bytes(path)) @specialize.argtype(0) def mkfifo(path, mode): - if isinstance(path, str): - os.mkfifo(path, mode) - else: - os.mkfifo(path.as_bytes(), mode) + os.mkfifo(_as_bytes(path), mode) @specialize.argtype(0) def mknod(path, mode, device): - if isinstance(path, str): - os.mknod(path, mode, device) - else: - os.mknod(path.as_bytes(), mode, device) + os.mknod(_as_bytes(path), mode, device) @specialize.argtype(0, 1) def symlink(src, dest): - if isinstance(src, str): - os.symlink(src, dest) - else: - os.symlink(src.as_bytes(), dest.as_bytes()) + os.symlink(_as_bytes(src), _as_bytes(dest)) if os.name == 'nt': import nt + @specialize.argtype(0) def _getfullpathname(path): - if isinstance(path, str): - return nt._getfullpathname(path) - else: - return nt._getfullpathname(path.as_bytes()) + return nt._getfullpathname(_as_bytes(path)) @specialize.argtype(0, 1) def putenv(name, value): - if isinstance(name, str): - os.environ[name] = value - else: - os.environ[name.as_bytes()] = value.as_bytes() + os.environ[_as_bytes(name)] = _as_bytes(value) @specialize.argtype(0) def unsetenv(name): - if isinstance(name, str): - del os.environ[name] - else: - del os.environ[name.as_bytes()] + del os.environ[_as_bytes(name)] if os.name == 'nt': from rpython.rlib import rwin32 From noreply at buildbot.pypy.org Wed Mar 5 10:44:12 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 5 Mar 2014 10:44:12 +0100 (CET) Subject: [pypy-commit] pypy default: display nditer missing functionality Message-ID: <20140305094412.9DD631C0290@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69714:d9f8f5cae50e Date: 2014-03-05 11:39 +0200 http://bitbucket.org/pypy/pypy/changeset/d9f8f5cae50e/ Log: display nditer missing functionality diff --git a/pypy/module/micronumpy/tool/numready/main.py b/pypy/module/micronumpy/tool/numready/main.py --- a/pypy/module/micronumpy/tool/numready/main.py +++ b/pypy/module/micronumpy/tool/numready/main.py @@ -39,7 +39,7 @@ return len(self._items) class Item(object): - def __init__(self, name, kind, subitems=None): + def __init__(self, name, kind, subitems=[]): self.name = name self.kind = kind self.subitems = subitems @@ -72,7 +72,7 @@ items = SearchableSet() for line in lines: kind, name = line.split(" : ", 1) - subitems = None + subitems = [] if kind == KINDS["TYPE"] and name in SPECIAL_NAMES and attr is None: subitems = find_numpy_items(python, modname, name) items.add(Item(name, kind, subitems)) @@ -93,7 +93,8 @@ l[i].append(lst[k * lgt + i]) return l -SPECIAL_NAMES = ["ndarray", "dtype", "generic", "flatiter", "ufunc"] +SPECIAL_NAMES = ["ndarray", "dtype", "generic", "flatiter", "ufunc", + "nditer"] def main(argv): cpy_items = find_numpy_items("/usr/bin/python") diff --git a/pypy/module/micronumpy/tool/numready/search.py b/pypy/module/micronumpy/tool/numready/search.py --- a/pypy/module/micronumpy/tool/numready/search.py +++ b/pypy/module/micronumpy/tool/numready/search.py @@ -30,4 +30,4 @@ print kind, ":", name if __name__ == "__main__": - main(sys.argv) \ No newline at end of file + main(sys.argv) From noreply at buildbot.pypy.org Wed Mar 5 10:44:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 5 Mar 2014 10:44:13 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140305094413.E28071C0290@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69715:3fc0ff96d0bc Date: 2014-03-05 11:41 +0200 http://bitbucket.org/pypy/pypy/changeset/3fc0ff96d0bc/ Log: merge heads diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -136,141 +136,92 @@ # - but rpython.rtyper.module.ll_os.py on Windows will replace these functions # with other wrappers that directly handle unicode strings. @specialize.argtype(0) -def open(path, flags, mode): +def _as_bytes(path): assert path is not None if isinstance(path, str): - return os.open(path, flags, mode) + return path else: - return os.open(path.as_bytes(), flags, mode) + return path.as_bytes() + + at specialize.argtype(0) +def open(path, flags, mode): + return os.open(_as_bytes(path), flags, mode) @specialize.argtype(0) def stat(path): - if isinstance(path, str): - return os.stat(path) - else: - return os.stat(path.as_bytes()) + return os.stat(_as_bytes(path)) @specialize.argtype(0) def lstat(path): - if isinstance(path, str): - return os.lstat(path) - else: - return os.lstat(path.as_bytes()) + return os.lstat(_as_bytes(path)) @specialize.argtype(0) def statvfs(path): - if isinstance(path, str): - return os.statvfs(path) - else: - return os.statvfs(path.as_bytes()) + return os.statvfs(_as_bytes(path)) @specialize.argtype(0) def unlink(path): - if isinstance(path, str): - return os.unlink(path) - else: - return os.unlink(path.as_bytes()) + return os.unlink(_as_bytes(path)) @specialize.argtype(0, 1) def rename(path1, path2): - if isinstance(path1, str): - return os.rename(path1, path2) - else: - return os.rename(path1.as_bytes(), path2.as_bytes()) + return os.rename(_as_bytes(path1), _as_bytes(path2)) @specialize.argtype(0) def listdir(dirname): - if isinstance(dirname, str): - return os.listdir(dirname) - else: - return os.listdir(dirname.as_bytes()) + return os.listdir(_as_bytes(dirname)) @specialize.argtype(0) def access(path, mode): - if isinstance(path, str): - return os.access(path, mode) - else: - return os.access(path.as_bytes(), mode) + return os.access(_as_bytes(path), mode) @specialize.argtype(0) def chmod(path, mode): - if isinstance(path, str): - return os.chmod(path, mode) - else: - return os.chmod(path.as_bytes(), mode) + return os.chmod(_as_bytes(path), mode) @specialize.argtype(0, 1) def utime(path, times): - if isinstance(path, str): - return os.utime(path, times) - else: - return os.utime(path.as_bytes(), times) + return os.utime(_as_bytes(path), times) @specialize.argtype(0) def chdir(path): - if isinstance(path, str): - return os.chdir(path) - else: - return os.chdir(path.as_bytes()) + return os.chdir(_as_bytes(path)) @specialize.argtype(0) def mkdir(path, mode=0777): - if isinstance(path, str): - return os.mkdir(path, mode) - else: - return os.mkdir(path.as_bytes(), mode) + return os.mkdir(_as_bytes(path), mode) @specialize.argtype(0) def rmdir(path): - if isinstance(path, str): - return os.rmdir(path) - else: - return os.rmdir(path.as_bytes()) + return os.rmdir(_as_bytes(path)) @specialize.argtype(0) def mkfifo(path, mode): - if isinstance(path, str): - os.mkfifo(path, mode) - else: - os.mkfifo(path.as_bytes(), mode) + os.mkfifo(_as_bytes(path), mode) @specialize.argtype(0) def mknod(path, mode, device): - if isinstance(path, str): - os.mknod(path, mode, device) - else: - os.mknod(path.as_bytes(), mode, device) + os.mknod(_as_bytes(path), mode, device) @specialize.argtype(0, 1) def symlink(src, dest): - if isinstance(src, str): - os.symlink(src, dest) - else: - os.symlink(src.as_bytes(), dest.as_bytes()) + os.symlink(_as_bytes(src), _as_bytes(dest)) if os.name == 'nt': import nt + @specialize.argtype(0) def _getfullpathname(path): - if isinstance(path, str): - return nt._getfullpathname(path) - else: - return nt._getfullpathname(path.as_bytes()) + return nt._getfullpathname(_as_bytes(path)) @specialize.argtype(0, 1) def putenv(name, value): - if isinstance(name, str): - os.environ[name] = value - else: - os.environ[name.as_bytes()] = value.as_bytes() + os.environ[_as_bytes(name)] = _as_bytes(value) @specialize.argtype(0) def unsetenv(name): - if isinstance(name, str): - del os.environ[name] - else: - del os.environ[name.as_bytes()] + del os.environ[_as_bytes(name)] if os.name == 'nt': from rpython.rlib import rwin32 diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -25,10 +25,9 @@ def as_unicode(self): return self.unistr -class TestPosixUnicode: +class BasePosixUnicode: def setup_method(self, method): - self.ufilename = (unicode(udir.join('test_open')) + - u'\u65e5\u672c.txt') # "Japan" + self.ufilename = self._get_filename() try: f = file(self.ufilename, 'w') except UnicodeEncodeError: @@ -148,3 +147,13 @@ rposix.unsetenv(self.path) interpret(f, []) # does not crash + + +class TestPosixAscii(BasePosixUnicode): + def _get_filename(self): + return str(udir.join('test_open_ascii')) + +class TestPosixUnicode(BasePosixUnicode): + def _get_filename(self): + return (unicode(udir.join('test_open')) + + u'\u65e5\u672c.txt') # "Japan" From noreply at buildbot.pypy.org Wed Mar 5 10:54:19 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 5 Mar 2014 10:54:19 +0100 (CET) Subject: [pypy-commit] stmgc default: test hash/id in demo_random Message-ID: <20140305095419.581371C02C1@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r960:2c5fadc1a485 Date: 2014-03-05 10:54 +0100 http://bitbucket.org/pypy/stmgc/changeset/2c5fadc1a485/ Log: test hash/id in demo_random diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -24,6 +24,8 @@ struct object_s hdr; int sig; long my_size; + long my_id; + long my_hash; nodeptr_t next; }; @@ -202,7 +204,7 @@ objptr_t simple_events(objptr_t p, objptr_t _r) { - int k = get_rand(8); + int k = get_rand(10); int num; switch (k) { @@ -230,6 +232,8 @@ p = stm_allocate(size); ((nodeptr_t)p)->sig = SIGNATURE; ((nodeptr_t)p)->my_size = size; + ((nodeptr_t)p)->my_id = -1; + ((nodeptr_t)p)->my_hash = -1; pop_roots(); /* reload_roots not necessary, all are old after start_transaction */ break; @@ -249,6 +253,32 @@ write_barrier(_r); set_next(_r, p); break; + case 8: // id checking + if (p) { + nodeptr_t n = (nodeptr_t)p; + if (n->my_id == -1) { + write_barrier(p); + n->my_id = stm_id(p); + } + else { + read_barrier(p); + assert(n->my_id == stm_id(p)); + } + } + break; + case 9: + if (p) { + nodeptr_t n = (nodeptr_t)p; + if (n->my_hash == -1) { + write_barrier(p); + n->my_hash = stm_identityhash(p); + } + else { + read_barrier(p); + assert(n->my_hash == stm_identityhash(p)); + } + } + break; } return p; } @@ -359,6 +389,8 @@ shared_roots[i] = stm_allocate(sizeof(struct node_s)); ((nodeptr_t)shared_roots[i])->sig = SIGNATURE; ((nodeptr_t)shared_roots[i])->my_size = sizeof(struct node_s); + ((nodeptr_t)shared_roots[i])->my_id = -1; + ((nodeptr_t)shared_roots[i])->my_hash = -1; STM_PUSH_ROOT(stm_thread_local, shared_roots[i]); } stm_commit_transaction(); From noreply at buildbot.pypy.org Wed Mar 5 11:20:44 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 5 Mar 2014 11:20:44 +0100 (CET) Subject: [pypy-commit] stmgc default: use real prebuilts in demo_random Message-ID: <20140305102044.9C08B1C3398@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r961:fc0b9331bca1 Date: 2014-03-05 11:21 +0100 http://bitbucket.org/pypy/stmgc/changeset/fc0b9331bca1/ Log: use real prebuilts in demo_random diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -9,11 +9,10 @@ #define NUMTHREADS 3 #define STEPS_PER_THREAD 500 -#define THREAD_STARTS 100 // how many restarts of threads -#define SHARED_ROOTS 3 +#define THREAD_STARTS 1000 // how many restarts of threads +#define PREBUILT_ROOTS 3 #define MAXROOTS 1000 - // SUPPORT struct node_s; typedef TLPREFIX struct node_s node_t; @@ -37,7 +36,7 @@ // global and per-thread-data time_t default_seed; -objptr_t shared_roots[SHARED_ROOTS]; +objptr_t prebuilt_roots[PREBUILT_ROOTS]; struct thread_data { unsigned int thread_seed; @@ -76,22 +75,6 @@ assert(n->next == *last_next); } -void _push_shared_roots() -{ - int i; - for (i = 0; i < SHARED_ROOTS; i++) { - STM_PUSH_ROOT(stm_thread_local, shared_roots[i]); - } -} - -void _pop_shared_roots() -{ - int i; - for (i = 0; i < SHARED_ROOTS; i++) { - STM_POP_ROOT(stm_thread_local, shared_roots[SHARED_ROOTS - i - 1]); - } -} - int get_rand(int max) { if (max == 0) @@ -107,8 +90,8 @@ return td.roots[num]; } else { - num = get_rand(SHARED_ROOTS); - return shared_roots[num]; + num = get_rand(PREBUILT_ROOTS); + return prebuilt_roots[num]; } } @@ -325,9 +308,6 @@ int status; stm_register_thread_local(&stm_thread_local); - /* forever on the shadowstack: */ - _push_shared_roots(); - setup_thread(); objptr_t p = NULL; @@ -384,25 +364,28 @@ { int i; + struct node_s prebuilt_template = { + .sig = SIGNATURE, + .my_size = sizeof(struct node_s), + .my_id = -1, + .my_hash = -1, + .next = NULL + }; + stm_start_inevitable_transaction(&stm_thread_local); - for (i = 0; i < SHARED_ROOTS; i++) { - shared_roots[i] = stm_allocate(sizeof(struct node_s)); - ((nodeptr_t)shared_roots[i])->sig = SIGNATURE; - ((nodeptr_t)shared_roots[i])->my_size = sizeof(struct node_s); - ((nodeptr_t)shared_roots[i])->my_id = -1; - ((nodeptr_t)shared_roots[i])->my_hash = -1; - STM_PUSH_ROOT(stm_thread_local, shared_roots[i]); + for (i = 0; i < PREBUILT_ROOTS; i++) { + void* new_templ = malloc(sizeof(struct node_s)); + memcpy(new_templ, &prebuilt_template, sizeof(struct node_s)); + prebuilt_roots[i] = stm_setup_prebuilt((objptr_t)new_templ); + + if (i % 2 == 0) { + int hash = i; + stm_set_prebuilt_identityhash(prebuilt_roots[i], + hash); + ((nodeptr_t)prebuilt_roots[i])->my_hash = hash; + } } stm_commit_transaction(); - - /* make them OLD */ - - stm_start_inevitable_transaction(&stm_thread_local); - /* update now old references: */ - _pop_shared_roots(); - _push_shared_roots(); - stm_commit_transaction(); - /* leave them on this shadow stack forever for major collections */ } int main(void) @@ -442,7 +425,6 @@ printf("Test OK!\n"); - _pop_shared_roots(); stm_unregister_thread_local(&stm_thread_local); stm_teardown(); From noreply at buildbot.pypy.org Wed Mar 5 12:23:59 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Wed, 5 Mar 2014 12:23:59 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_range_iter_simple Message-ID: <20140305112359.02F1C1C31E9@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: Changeset: r69716:10e1e307844f Date: 2014-03-05 11:54 +0100 http://bitbucket.org/pypy/pypy/changeset/10e1e307844f/ Log: fix test_range_iter_simple diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -176,14 +176,14 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match(""" guard_not_invalidated? - i16 = int_ge(i11, i12) - guard_false(i16, descr=...) + i16 = int_lt(i11, i12) + guard_true(i16, descr=...) i20 = int_add(i11, 1) i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? - i25 = int_ge(i11, i9) - guard_false(i25, descr=...) + i25 = int_lt(i11, i9) + guard_true(i25, descr=...) i27 = int_add_ovf(i7, i11) guard_no_overflow(descr=...) --TICK-- From noreply at buildbot.pypy.org Wed Mar 5 12:24:00 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Wed, 5 Mar 2014 12:24:00 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_range_iter_normal Message-ID: <20140305112400.6E2EE1C31E9@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: Changeset: r69717:925eec5518eb Date: 2014-03-05 12:22 +0100 http://bitbucket.org/pypy/pypy/changeset/925eec5518eb/ Log: fix test_range_iter_normal diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -214,10 +214,10 @@ i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? - i23 = int_lt(i18, 0) - guard_false(i23, descr=...) - i25 = int_ge(i18, i9) - guard_false(i25, descr=...) + i23 = int_ge(i18, 0) + guard_true(i23, descr=...) + i25 = int_lt(i18, i9) + guard_true(i25, descr=...) i27 = int_add_ovf(i7, i18) guard_no_overflow(descr=...) --TICK-- From noreply at buildbot.pypy.org Wed Mar 5 12:44:07 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Wed, 5 Mar 2014 12:44:07 +0100 (CET) Subject: [pypy-commit] pypy default: add some extra checks for negative indices on SimpleRangeListStrategy Message-ID: <20140305114407.9E7A41C0290@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: Changeset: r69718:b25b440c713c Date: 2014-03-05 12:37 +0100 http://bitbucket.org/pypy/pypy/changeset/b25b440c713c/ Log: add some extra checks for negative indices on SimpleRangeListStrategy diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -109,6 +109,13 @@ assert not self.not_forced(r) assert r == [1, 2, 3, 4, 5, 6, 7] + r = range(4) + assert r[-1] == 3 + assert r[3] == 3 + assert r[-4] == 0 + raises(IndexError, r.__getitem__, -5) + raises(IndexError, r.__getitem__, 4) + def test_reduce(self): it = iter(range(10)) assert it.next() == 0 From noreply at buildbot.pypy.org Wed Mar 5 12:46:38 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Wed, 5 Mar 2014 12:46:38 +0100 (CET) Subject: [pypy-commit] pypy default: move checks to separate test method Message-ID: <20140305114638.288321C3152@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: Changeset: r69719:838e4aa1dc2d Date: 2014-03-05 12:45 +0100 http://bitbucket.org/pypy/pypy/changeset/838e4aa1dc2d/ Log: move checks to separate test method diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -109,6 +109,7 @@ assert not self.not_forced(r) assert r == [1, 2, 3, 4, 5, 6, 7] + def test_getitem_simple(self): r = range(4) assert r[-1] == 3 assert r[3] == 3 From noreply at buildbot.pypy.org Wed Mar 5 12:51:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 12:51:27 +0100 (CET) Subject: [pypy-commit] pypy default: typos Message-ID: <20140305115127.D4D2D1C3152@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69720:45869be00209 Date: 2014-03-05 11:27 +0100 http://bitbucket.org/pypy/pypy/changeset/45869be00209/ Log: typos diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -11,7 +11,7 @@ `mailing list`_. This is simply for the reason that small possible projects tend to change very rapidly. -This list is mostly for having on overview on potential projects. This list is +This list is mostly for having an overview on potential projects. This list is by definition not exhaustive and we're pleased if people come up with their own improvement ideas. In any case, if you feel like working on some of those projects, or anything else in PyPy, pop up on IRC or write to us on the @@ -71,7 +71,7 @@ different ways to represent a unicode string, depending on whether the string fits into ASCII, has only two-byte characters or needs four-byte characters. -The actual details would be rather differen in PyPy, but we would like to have +The actual details would be rather different in PyPy, but we would like to have the same optimization implemented. Or maybe not. We can also play around with the idea of using a single @@ -142,7 +142,7 @@ * `hg` -Embedding PyPy +Embedding PyPy and improving CFFI ---------------------------------------- Note: there is a basic proof-of-concept for that as a `uwsgi pypy plugin`_ @@ -154,6 +154,8 @@ a dynamic-link library with whatever C API we want. This gives us a one-size-fits-all generic way to make .so/.dll files from Python. +This would fit well in a "redesign CFFI" work. + .. _`uwsgi pypy plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html Optimising cpyext (CPython C-API compatibility layer) From noreply at buildbot.pypy.org Wed Mar 5 12:51:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 12:51:42 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Create the branch stmgc-c7 as a merge of stmgc-c4 and default. Message-ID: <20140305115142.219911C3347@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69721:9e5c4be5c4f2 Date: 2014-03-05 12:49 +0100 http://bitbucket.org/pypy/pypy/changeset/9e5c4be5c4f2/ Log: Create the branch stmgc-c7 as a merge of stmgc-c4 and default. diff too long, truncating to 2000 out of 102364 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,12 +26,14 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/dotviewer/graphdisplay.py b/dotviewer/graphdisplay.py --- a/dotviewer/graphdisplay.py +++ b/dotviewer/graphdisplay.py @@ -136,6 +136,7 @@ Click on objects to move around Drag with the left mouse button to zoom in/out Drag with the right mouse button to scroll + Use scroll wheel do scroll up or down """.replace('\n ', '\n').strip() # poor man's dedent @@ -613,6 +614,19 @@ def process_MouseButtonUp(self, event): self.dragging = None pygame.event.set_grab(False) + # handle directional scrolling + if event.button == 4: + self.pan((0, -1)) + return + if event.button == 5: + self.pan((0, 1)) + return + if event.button == 6: + self.pan((-1, 0)) + return + if event.button == 7: + self.pan((1, 0)) + return if self.click_time is not None and abs(time.time() - self.click_time) < 1: # click (no significant dragging) self.notifyclick(self.click_origin) diff --git a/include/PyPy.h b/include/PyPy.h new file mode 100644 --- /dev/null +++ b/include/PyPy.h @@ -0,0 +1,60 @@ +#ifndef _PYPY_H_ +#define _PYPY_H_ + +/* This header is meant to be included in programs that use PyPy as an + embedded library. */ + +#ifdef __cplusplus +extern "C" { +#endif + +// call this first +void rpython_startup_code(void); + +// pypy_init_threads has to be called in case you want to use threads +void pypy_init_threads(void); + +/* Initialize the home directory of PyPy. It is necessary to call this. + + Call it with "home" being the file name of the libpypy.so, for + example; it will be used as a starting point when searching for the + lib-python and lib_pypy directories. They are searched from + "home/..", "home/../..", etc. Returns 0 if everything was fine. If + an error occurs, returns 1 and (if verbose != 0) prints some + information to stderr. + */ +int pypy_setup_home(char *home, int verbose); + + +/* If your program has multiple threads, then you need to call + pypy_thread_attach() once in each other thread that just started + and in which you want to run Python code (including via callbacks, + see below). DO NOT CALL IT IN THE MAIN THREAD + */ +void pypy_thread_attach(void); + + +/* The main entry point: executes "source" as plain Python code. + Returns 0 if everything was fine. If a Python exception is + uncaught, it is printed to stderr and 1 is returned. + + Usually, the Python code from "source" should use cffi to fill in + global variables of "function pointer" type in your program. Use + cffi callbacks to do so. Once it is done, there is no need to call + pypy_execute_source() any more: from C, you call directly the + functions (which are "callbacks" from the point of view of Python). + */ +int pypy_execute_source(char *source); + +/* a similar function, but inside Python code it'll register + a magic argument c_argument as int, which will be passed as void* from C. + Useful for passing pointers to arbitrary structs that contain callbacks + to register */ +int pypy_execute_source_ptr(char *source, void* ptr); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -105,7 +105,6 @@ self.assertEqual(ArgType, type(parm)) - @xfail def test_floats(self): # c_float and c_double can be created from # Python int, long and float diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/test/keycert.pem b/lib-python/2.7/test/keycert.pem --- a/lib-python/2.7/test/keycert.pem +++ b/lib-python/2.7/test/keycert.pem @@ -1,32 +1,31 @@ ------BEGIN RSA PRIVATE KEY----- -MIICXwIBAAKBgQC8ddrhm+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9L -opdJhTvbGfEj0DQs1IE8M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVH -fhi/VwovESJlaBOp+WMnfhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQAB -AoGBAK0FZpaKj6WnJZN0RqhhK+ggtBWwBnc0U/ozgKz2j1s3fsShYeiGtW6CK5nU -D1dZ5wzhbGThI7LiOXDvRucc9n7vUgi0alqPQ/PFodPxAN/eEYkmXQ7W2k7zwsDA -IUK0KUhktQbLu8qF/m8qM86ba9y9/9YkXuQbZ3COl5ahTZrhAkEA301P08RKv3KM -oXnGU2UHTuJ1MAD2hOrPxjD4/wxA/39EWG9bZczbJyggB4RHu0I3NOSFjAm3HQm0 -ANOu5QK9owJBANgOeLfNNcF4pp+UikRFqxk5hULqRAWzVxVrWe85FlPm0VVmHbb/ -loif7mqjU8o1jTd/LM7RD9f2usZyE2psaw8CQQCNLhkpX3KO5kKJmS9N7JMZSc4j -oog58yeYO8BBqKKzpug0LXuQultYv2K4veaIO04iL9VLe5z9S/Q1jaCHBBuXAkEA -z8gjGoi1AOp6PBBLZNsncCvcV/0aC+1se4HxTNo2+duKSDnbq+ljqOM+E7odU+Nq -ewvIWOG//e8fssd0mq3HywJBAJ8l/c8GVmrpFTx8r/nZ2Pyyjt3dH1widooDXYSV -q6Gbf41Llo5sYAtmxdndTLASuHKecacTgZVhy0FryZpLKrU= ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm +LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0 +ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP +USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt +CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq +SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK +UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y +BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ +ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5 +oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik +eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F +0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS +x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/ +SPIXQuT8RMPDVNQ= +-----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIICpzCCAhCgAwIBAgIJAP+qStv1cIGNMA0GCSqGSIb3DQEBBQUAMIGJMQswCQYD -VQQGEwJVUzERMA8GA1UECBMIRGVsYXdhcmUxEzARBgNVBAcTCldpbG1pbmd0b24x -IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMQwwCgYDVQQLEwNT -U0wxHzAdBgNVBAMTFnNvbWVtYWNoaW5lLnB5dGhvbi5vcmcwHhcNMDcwODI3MTY1 -NDUwWhcNMTMwMjE2MTY1NDUwWjCBiTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCERl -bGF3YXJlMRMwEQYDVQQHEwpXaWxtaW5ndG9uMSMwIQYDVQQKExpQeXRob24gU29m -dHdhcmUgRm91bmRhdGlvbjEMMAoGA1UECxMDU1NMMR8wHQYDVQQDExZzb21lbWFj -aGluZS5weXRob24ub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC8ddrh -m+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9LopdJhTvbGfEj0DQs1IE8 -M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVHfhi/VwovESJlaBOp+WMn -fhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQABoxUwEzARBglghkgBhvhC -AQEEBAMCBkAwDQYJKoZIhvcNAQEFBQADgYEAF4Q5BVqmCOLv1n8je/Jw9K669VXb -08hyGzQhkemEBYQd6fzQ9A/1ZzHkJKb1P6yreOLSEh4KcxYPyrLRC1ll8nr5OlCx -CMhKkTnR6qBsdNV0XtdU2+N25hqW+Ma4ZeqsN/iiJVCGNOZGnvQuvCAGWF8+J/f/ -iHkC6gGdBJhogs4= +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/sha256.pem b/lib-python/2.7/test/sha256.pem --- a/lib-python/2.7/test/sha256.pem +++ b/lib-python/2.7/test/sha256.pem @@ -1,129 +1,128 @@ # Certificate chain for https://sha256.tbs-internet.com - 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=sha-256 production/CN=sha256.tbs-internet.com - i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC + 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=Certificats TBS X509/CN=ecom.tbs-x509.com + i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business -----BEGIN CERTIFICATE----- -MIIGXTCCBUWgAwIBAgIRAMmag+ygSAdxZsbyzYjhuW0wDQYJKoZIhvcNAQELBQAw -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl +MIIGTjCCBTagAwIBAgIQOh3d9dNDPq1cSdJmEiMpqDANBgkqhkiG9w0BAQUFADCB +yTELMAkGA1UEBhMCRlIxETAPBgNVBAgTCENhbHZhZG9zMQ0wCwYDVQQHEwRDYWVu +MRUwEwYDVQQKEwxUQlMgSU5URVJORVQxSDBGBgNVBAsTP1Rlcm1zIGFuZCBDb25k +aXRpb25zOiBodHRwOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0EvcmVwb3NpdG9y +eTEYMBYGA1UECxMPVEJTIElOVEVSTkVUIENBMR0wGwYDVQQDExRUQlMgWDUwOSBD +QSBidXNpbmVzczAeFw0xMTAxMjUwMDAwMDBaFw0xMzAyMDUyMzU5NTlaMIHHMQsw +CQYDVQQGEwJGUjEOMAwGA1UEERMFMTQwMDAxETAPBgNVBAgTCENhbHZhZG9zMQ0w +CwYDVQQHEwRDQUVOMRswGQYDVQQJExIyMiBydWUgZGUgQnJldGFnbmUxFTATBgNV +BAoTDFRCUyBJTlRFUk5FVDEXMBUGA1UECxMOMDAwMiA0NDA0NDM4MTAxHTAbBgNV +BAsTFENlcnRpZmljYXRzIFRCUyBYNTA5MRowGAYDVQQDExFlY29tLnRicy14NTA5 +LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKRrlHUnJ++1lpcg +jtYco7cdmRe+EEfTmwPfCdfV3G1QfsTSvY6FfMpm/83pqHfT+4ANwr18wD9ZrAEN +G16mf9VdCGK12+TP7DmqeZyGIqlFFoahQnmb8EarvE43/1UeQ2CV9XmzwZvpqeli +LfXsFonawrY3H6ZnMwS64St61Z+9gdyuZ/RbsoZBbT5KUjDEG844QRU4OT1IGeEI +eY5NM5RNIh6ZNhVtqeeCxMS7afONkHQrOco73RdSTRck/Hj96Ofl3MHNHryr+AMK +DGFk1kLCZGpPdXtkxXvaDeQoiYDlil26CWc+YK6xyDPMdsWvoG14ZLyCpzMXA7/7 +4YAQRH0CAwEAAaOCAjAwggIsMB8GA1UdIwQYMBaAFBoJBMz5CY+7HqDO1KQUf0vV +I1jNMB0GA1UdDgQWBBQgOU8HsWzbmD4WZP5Wtdw7jca2WDAOBgNVHQ8BAf8EBAMC +BaAwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw +TAYDVR0gBEUwQzBBBgsrBgEEAYDlNwIBATAyMDAGCCsGAQUFBwIBFiRodHRwczov +L3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL0NQUzEwdwYDVR0fBHAwbjA3oDWgM4Yx +aHR0cDovL2NybC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNy +bDAzoDGgL4YtaHR0cDovL2NybC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5l +c3MuY3JsMIGwBggrBgEFBQcBAQSBozCBoDA9BggrBgEFBQcwAoYxaHR0cDovL2Ny +dC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNydDA5BggrBgEF +BQcwAoYtaHR0cDovL2NydC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5lc3Mu +Y3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wMwYDVR0R +BCwwKoIRZWNvbS50YnMteDUwOS5jb22CFXd3dy5lY29tLnRicy14NTA5LmNvbTAN +BgkqhkiG9w0BAQUFAAOCAQEArT4NHfbY87bGAw8lPV4DmHlmuDuVp/y7ltO3Ynse +3Rz8RxW2AzuO0Oy2F0Cu4yWKtMyEyMXyHqWtae7ElRbdTu5w5GwVBLJHClCzC8S9 +SpgMMQTx3Rgn8vjkHuU9VZQlulZyiPK7yunjc7c310S9FRZ7XxOwf8Nnx4WnB+No +WrfApzhhQl31w+RyrNxZe58hCfDDHmevRvwLjQ785ZoQXJDj2j3qAD4aI2yB8lB5 +oaE1jlCJzC7Kmz/Y9jzfmv/zAs1LQTm9ktevv4BTUFaGjv9jxnQ1xnS862ZiouLW +zZYIlYPf4F6JjXGiIQgQRglILUfq3ftJd9/ok9W9ZF8h8w== +-----END CERTIFICATE----- + 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business + i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root +-----BEGIN CERTIFICATE----- +MIIFPzCCBCegAwIBAgIQDlBz/++iRSmLDeVRHT/hADANBgkqhkiG9w0BAQUFADBv +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk +ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF +eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDcwOTE4MTkyMlow +gckxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMB4XDTEwMDIxODAwMDAwMFoXDTEyMDIxOTIzNTk1OVowgcsxCzAJBgNV -BAYTAkZSMQ4wDAYDVQQREwUxNDAwMDERMA8GA1UECBMIQ2FsdmFkb3MxDTALBgNV -BAcTBENBRU4xGzAZBgNVBAkTEjIyIHJ1ZSBkZSBCcmV0YWduZTEVMBMGA1UEChMM -VEJTIElOVEVSTkVUMRcwFQYDVQQLEw4wMDAyIDQ0MDQ0MzgxMDEbMBkGA1UECxMS -c2hhLTI1NiBwcm9kdWN0aW9uMSAwHgYDVQQDExdzaGEyNTYudGJzLWludGVybmV0 -LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbuM8VT7f0nntwu -N3F7v9KIBlhKNAxqCrziOXU5iqUt8HrQB3DtHbdmII+CpVUlwlmepsx6G+srEZ9a -MIGAy0nxi5aLb7watkyIdPjJTMvTUBQ/+RPWzt5JtYbbY9BlJ+yci0dctP74f4NU -ISLtlrEjUbf2gTohLrcE01TfmOF6PDEbB5PKDi38cB3NzKfizWfrOaJW6Q1C1qOJ -y4/4jkUREX1UFUIxzx7v62VfjXSGlcjGpBX1fvtABQOSLeE0a6gciDZs1REqroFf -5eXtqYphpTa14Z83ITXMfgg5Nze1VtMnzI9Qx4blYBw4dgQVEuIsYr7FDBOITDzc -VEVXZx0CAwEAAaOCAj8wggI7MB8GA1UdIwQYMBaAFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMB0GA1UdDgQWBBSJKI/AYVI9RQNY0QPIqc8ej2QivTAOBgNVHQ8BAf8EBAMC -BaAwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAQYIKwYBBQUHAwIG -CisGAQQBgjcKAwMGCWCGSAGG+EIEATBMBgNVHSAERTBDMEEGCysGAQQBgOU3AgQB -MDIwMAYIKwYBBQUHAgEWJGh0dHBzOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0Ev -Q1BTNDBtBgNVHR8EZjBkMDKgMKAuhixodHRwOi8vY3JsLnRicy1pbnRlcm5ldC5j -b20vVEJTWDUwOUNBU0dDLmNybDAuoCygKoYoaHR0cDovL2NybC50YnMteDUwOS5j -b20vVEJTWDUwOUNBU0dDLmNybDCBpgYIKwYBBQUHAQEEgZkwgZYwOAYIKwYBBQUH -MAKGLGh0dHA6Ly9jcnQudGJzLWludGVybmV0LmNvbS9UQlNYNTA5Q0FTR0MuY3J0 -MDQGCCsGAQUFBzAChihodHRwOi8vY3J0LnRicy14NTA5LmNvbS9UQlNYNTA5Q0FT -R0MuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wPwYD -VR0RBDgwNoIXc2hhMjU2LnRicy1pbnRlcm5ldC5jb22CG3d3dy5zaGEyNTYudGJz -LWludGVybmV0LmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAA5NL0D4QSqhErhlkdPmz -XtiMvdGL+ZehM4coTRIpasM/Agt36Rc0NzCvnQwKE+wkngg1Gy2qe7Q0E/ziqBtB -fZYzdVgu1zdiL4kTaf+wFKYAFGsFbyeEmXysy+CMwaNoF2vpSjCU1UD56bEnTX/W -fxVZYxtBQUpnu2wOsm8cDZuZRv9XrYgAhGj9Tt6F0aVHSDGn59uwShG1+BVF/uju -SCyPTTjL1oc7YElJUzR/x4mQJYvtQI8gDIDAGEOs7v3R/gKa5EMfbUQUI4C84UbI -Yz09Jdnws/MkC/Hm1BZEqk89u7Hvfv+oHqEb0XaUo0TDfsxE0M1sMdnLb91QNQBm -UQ== ------END CERTIFICATE----- - 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC - i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root ------BEGIN CERTIFICATE----- -MIIFVjCCBD6gAwIBAgIQXpDZ0ETJMV02WTx3GTnhhTANBgkqhkiG9w0BAQUFADBv -MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk -ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF -eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDYyNDE5MDYzMFow -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl -bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u -ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgOkO3f7wzN6 -rOjg45tR5vjBfzK7qmV9IBxb/QW9EEXxG+E7FNhZqQLtwGBKoSsHTnQqV75wWMk0 -9tinWvftBkSpj5sTi/8cbzJfUvTSVYh3Qxv6AVVjMMH/ruLjE6y+4PoaPs8WoYAQ -ts5R4Z1g8c/WnTepLst2x0/Wv7GmuoQi+gXvHU6YrBiu7XkeYhzc95QdviWSJRDk -owhb5K43qhcvjRmBfO/paGlCliDGZp8mHwrI21mwobWpVjTxZRwYO3bd4+TGcI4G -Ie5wmHwE8F7SK1tgSqbBacKjDa93j7txKkfz/Yd2n7TGqOXiHPsJpG655vrKtnXk -9vs1zoDeJQIDAQABo4IBljCCAZIwHQYDVR0OBBYEFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMCAGA1UdJQQZ -MBcGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAYBgNVHSAEETAPMA0GCysGAQQBgOU3 -AgQBMHsGA1UdHwR0MHIwOKA2oDSGMmh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL0Fk -ZFRydXN0RXh0ZXJuYWxDQVJvb3QuY3JsMDagNKAyhjBodHRwOi8vY3JsLmNvbW9k -by5uZXQvQWRkVHJ1c3RFeHRlcm5hbENBUm9vdC5jcmwwgYAGCCsGAQUFBwEBBHQw -cjA4BggrBgEFBQcwAoYsaHR0cDovL2NydC5jb21vZG9jYS5jb20vQWRkVHJ1c3RV -VE5TR0NDQS5jcnQwNgYIKwYBBQUHMAKGKmh0dHA6Ly9jcnQuY29tb2RvLm5ldC9B -ZGRUcnVzdFVUTlNHQ0NBLmNydDARBglghkgBhvhCAQEEBAMCAgQwDQYJKoZIhvcN -AQEFBQADggEBAK2zEzs+jcIrVK9oDkdDZNvhuBYTdCfpxfFs+OAujW0bIfJAy232 -euVsnJm6u/+OrqKudD2tad2BbejLLXhMZViaCmK7D9nrXHx4te5EP8rL19SUVqLY -1pTnv5dhNgEgvA7n5lIzDSYs7yRLsr7HJsYPr6SeYSuZizyX1SNz7ooJ32/F3X98 -RB0Mlc/E0OyOrkQ9/y5IrnpnaSora8CnUrV5XNOg+kyCz9edCyx4D5wXYcwZPVWz -8aDqquESrezPyjtfi4WRO4s/VD3HLZvOxzMrWAVYCDG9FxaOhF0QGuuG1F7F3GKV -v6prNyCl016kRl2j1UT+a7gLd8fA25A4C9E= +cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEdMBsGA1UEAxMUVEJTIFg1MDkg +Q0EgYnVzaW5lc3MwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB1PAU +qudCcz3tmyGcf+u6EkZqonKKHrV4gZYbvVkIRojmmlhfi/jwvpHvo8bqSt/9Rj5S +jhCDW0pcbI+IPPtD1Jy+CHNSfnMqVDy6CKQ3p5maTzCMG6ZT+XjnvcND5v+FtaiB +xk1iCX6uvt0jeUtdZvYbyytsSDE6c3Y5//wRxOF8tM1JxibwO3pyER26jbbN2gQz +m/EkdGjLdJ4svPk23WDAvQ6G0/z2LcAaJB+XLfqRwfQpHQvfKa1uTi8PivC8qtip +rmNQMMPMjxSK2azX8cKjjTDJiUKaCb4VHlJDWKEsCFRpgJAoAuX8f7Yfs1M4esGo +sWb3PGspK3O22uIlAgMBAAGjggF6MIIBdjAdBgNVHQ4EFgQUGgkEzPkJj7seoM7U +pBR/S9UjWM0wDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwGAYD +VR0gBBEwDzANBgsrBgEEAYDlNwIBATB7BgNVHR8EdDByMDigNqA0hjJodHRwOi8v +Y3JsLmNvbW9kb2NhLmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LmNybDA2oDSg +MoYwaHR0cDovL2NybC5jb21vZG8ubmV0L0FkZFRydXN0RXh0ZXJuYWxDQVJvb3Qu +Y3JsMIGGBggrBgEFBQcBAQR6MHgwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29t +b2RvY2EuY29tL0FkZFRydXN0VVROU2VydmVyQ0EuY3J0MDkGCCsGAQUFBzAChi1o +dHRwOi8vY3J0LmNvbW9kby5uZXQvQWRkVHJ1c3RVVE5TZXJ2ZXJDQS5jcnQwEQYJ +YIZIAYb4QgEBBAQDAgIEMA0GCSqGSIb3DQEBBQUAA4IBAQA7mqrMgk/MrE6QnbNA +h4nRCn2ti4bg4w2C3lB6bSvRPnYwuNw9Jb8vuKkNFzRDxNJXqVDZdfFW5CVQJuyd +nfAx83+wk+spzvFaE1KhFYfN9G9pQfXUfvDRoIcJgPEKUXL1wRiOG+IjU3VVI8pg +IgqHkr7ylln5i5zCiFAPuIJmYUSFg/gxH5xkCNcjJqqrHrHatJr6Qrrke93joupw +oU1njfAcZtYp6fbiK6u2b1pJqwkVBE8RsfLnPhRj+SFbpvjv8Od7o/ieJhFIYQNU +k2jX2u8qZnAiNw93LZW9lpYjtuvMXq8QQppENNja5b53q7UwI+lU7ZGjZ7quuESp +J6/5 -----END CERTIFICATE----- 2 s:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEZjCCA06gAwIBAgIQUSYKkxzif5zDpV954HKugjANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIETzCCAzegAwIBAgIQHM5EYpUZep1jUvnyI6m2mDANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw0wNTA2MDcwODA5MTBaFw0xOTA2MjQxOTA2MzBaMG8xCzAJBgNVBAYT -AlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0 -ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB -IFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39xoz5vIABC05 -4E5b7R+8bA/Ntfojts7emxEzl6QpTH2Tn71KvJPtAxrjj8/lbVBa1pcplFqAsEl6 -2y6V/bjKvzc4LR4+kUGtcFbH8E8/6DKedMrIkFTpxl8PeJ2aQDwOrGGqXhSPnoeh -alDc15pOrwWzpnGUnHGzUGAKxxOdOAeGAqjpqGkmGJCrTLBPI6s6T4TY386f4Wlv -u9dC12tE5Met7m1BX3JacQg3s3llpFmglDf3AC8NwpJy2tA4ctsUqEXEXSp9t7TW -xO6szRNEt8kr3UMAJfphuWlqWCMRt6czj1Z1WfXNKddGtworZbbTQm8Vsrh7++/p -XVPVNFonAgMBAAGjgdgwgdUwHwYDVR0jBBgwFoAUUzLRs89/+uDxoF2FTpLSnkUd -tE8wHQYDVR0OBBYEFK29mHo0tCb3+sQmVO8DveAky1QaMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MBEGCWCGSAGG+EIBAQQEAwIBAjAgBgNVHSUEGTAX -BgorBgEEAYI3CgMDBglghkgBhvhCBAEwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDov -L2NybC51c2VydHJ1c3QuY29tL1VUTi1EQVRBQ29ycFNHQy5jcmwwDQYJKoZIhvcN -AQEFBQADggEBAMbuUxdoFLJRIh6QWA2U/b3xcOWGLcM2MY9USEbnLQg3vGwKYOEO -rVE04BKT6b64q7gmtOmWPSiPrmQH/uAB7MXjkesYoPF1ftsK5p+R26+udd8jkWjd -FwBaS/9kbHDrARrQkNnHptZt9hPk/7XJ0h4qy7ElQyZ42TCbTg0evmnv3+r+LbPM -+bDdtRTKkdSytaX7ARmjR3mfnYyVhzT4HziS2jamEfpr62vp3EV4FTkG101B5CHI -3C+H0be/SGB1pWLLJN47YaApIKa+xWycxOkKaSLvkTr6Jq/RW0GnOuL4OAdCq8Fb -+M5tug8EPzI0rNwEKNdwMBQmBsTkm5jVz3g= +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNMDUwNjA3MDgwOTEwWhcNMTkwNzA5MTgxOTIyWjBvMQswCQYD +VQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0 +IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5h +bCBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt/caM+by +AAQtOeBOW+0fvGwPzbX6I7bO3psRM5ekKUx9k5+9SryT7QMa44/P5W1QWtaXKZRa +gLBJetsulf24yr83OC0ePpFBrXBWx/BPP+gynnTKyJBU6cZfD3idmkA8Dqxhql4U +j56HoWpQ3NeaTq8Fs6ZxlJxxs1BgCscTnTgHhgKo6ahpJhiQq0ywTyOrOk+E2N/O +n+Fpb7vXQtdrROTHre5tQV9yWnEIN7N5ZaRZoJQ39wAvDcKSctrQOHLbFKhFxF0q +fbe01sTurM0TRLfJK91DACX6YblpalgjEbenM49WdVn1zSnXRrcKK2W200JvFbK4 +e/vv6V1T1TRaJwIDAQABo4G9MIG6MB8GA1UdIwQYMBaAFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMB0GA1UdDgQWBBStvZh6NLQm9/rEJlTvA73gJMtUGjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zARBglghkgBhvhCAQEEBAMCAQIwRAYDVR0f +BD0wOzA5oDegNYYzaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly +c3QtSGFyZHdhcmUuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQByQhANOs4kClrwF8BW +onvUOGCSjRK52zYZgDXYNjDtmr5rJ6NyPFDNn+JxkLpjYetIFMTbSRe679Bt8m7a +gIAoQYFQtxMuyLnJegB2aEbQiIxh/tC21UcFF7ktdnDoTlA6w3pLuvunaI84Of3o +2YBrhzkTbCfaYk5JRlTpudW9DkUkHBsyx3nknPKnplkIGaK0jgn8E0n+SFabYaHk +I9LroYT/+JtLefh9lgBdAgVv0UPbzoGfuDsrk/Zh+UrgbLFpHoVnElhzbkh64Z0X +OGaJunQc68cCZu5HTn/aK7fBGMcVflRCXLVEQpU9PIAdGA8Ynvg684t8GMaKsRl1 +jIGZ -----END CERTIFICATE----- - 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD -VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu -dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 -E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ -D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK -4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq -lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW -bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB -o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT -MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js -LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr -BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB -AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj -j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH -KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv -2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 -mfnGV/TJVTl4uix5yaaIK/QI +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_audioop.py b/lib-python/2.7/test/test_audioop.py --- a/lib-python/2.7/test/test_audioop.py +++ b/lib-python/2.7/test/test_audioop.py @@ -1,6 +1,6 @@ import audioop import unittest -from test.test_support import run_unittest +from test.test_support import run_unittest, impl_detail endian = 'big' if audioop.getsample('\0\1', 2, 0) == 1 else 'little' @@ -93,21 +93,25 @@ wtd = len(d2)//3 self.assertEqual(len(audioop.lin2lin(d1, got, wtd)), len(d2)) + @impl_detail(pypy=False) def test_adpcm2lin(self): # Very cursory test self.assertEqual(audioop.adpcm2lin(b'\0\0', 1, None), (b'\0' * 4, (0,0))) self.assertEqual(audioop.adpcm2lin(b'\0\0', 2, None), (b'\0' * 8, (0,0))) self.assertEqual(audioop.adpcm2lin(b'\0\0', 4, None), (b'\0' * 16, (0,0))) + @impl_detail(pypy=False) def test_lin2adpcm(self): # Very cursory test self.assertEqual(audioop.lin2adpcm('\0\0\0\0', 1, None), ('\0\0', (0,0))) + @impl_detail(pypy=False) def test_lin2alaw(self): self.assertEqual(audioop.lin2alaw(data[0], 1), '\xd5\xc5\xf5') self.assertEqual(audioop.lin2alaw(data[1], 2), '\xd5\xd5\xd5') self.assertEqual(audioop.lin2alaw(data[2], 4), '\xd5\xd5\xd5') + @impl_detail(pypy=False) def test_alaw2lin(self): # Cursory d = audioop.lin2alaw(data[0], 1) @@ -123,11 +127,13 @@ self.assertEqual(audioop.alaw2lin(d, 4), b'\x00\x00\x08\x00\x00\x00\x08\x01\x00\x00\x10\x02') + @impl_detail(pypy=False) def test_lin2ulaw(self): self.assertEqual(audioop.lin2ulaw(data[0], 1), '\xff\xe7\xdb') self.assertEqual(audioop.lin2ulaw(data[1], 2), '\xff\xff\xff') self.assertEqual(audioop.lin2ulaw(data[2], 4), '\xff\xff\xff') + @impl_detail(pypy=False) def test_ulaw2lin(self): # Cursory d = audioop.lin2ulaw(data[0], 1) @@ -195,6 +201,7 @@ self.assertRaises(audioop.error, audioop.findmax, ''.join( chr(x) for x in xrange(256)), -2392392) + @impl_detail(pypy=False) def test_issue7673(self): state = None for data, size in INVALID_DATA: @@ -219,6 +226,7 @@ self.assertRaises(audioop.error, audioop.lin2alaw, data, size) self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state) + @impl_detail(pypy=False) def test_wrongsize(self): data = b'abc' state = None diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py --- a/lib-python/2.7/test/test_mailbox.py +++ b/lib-python/2.7/test/test_mailbox.py @@ -38,14 +38,9 @@ def _delete_recursively(self, target): # Delete a file or delete a directory recursively if os.path.isdir(target): - for path, dirs, files in os.walk(target, topdown=False): - for name in files: - os.remove(os.path.join(path, name)) - for name in dirs: - os.rmdir(os.path.join(path, name)) - os.rmdir(target) + test_support.rmtree(target) elif os.path.exists(target): - os.remove(target) + test_support.unlink(target) class TestMailbox(TestBase): @@ -137,6 +132,7 @@ msg = self._box.get(key1) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.fp.read(), '1') + msg.fp.close() def test_getitem(self): # Retrieve message using __getitem__() @@ -169,10 +165,14 @@ # Get file representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) - self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'), + msg0 = self._box.get_file(key0) + self.assertEqual(msg0.read().replace(os.linesep, '\n'), self._template % 0) - self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'), + msg1 = self._box.get_file(key1) + self.assertEqual(msg1.read().replace(os.linesep, '\n'), _sample_message) + msg0.close() + msg1.close() def test_get_file_can_be_closed_twice(self): # Issue 11700 @@ -407,6 +407,7 @@ self._box.add(contents[0]) self._box.add(contents[1]) self._box.add(contents[2]) + oldbox = self._box method() if should_call_close: self._box.close() @@ -415,6 +416,7 @@ self.assertEqual(len(keys), 3) for key in keys: self.assertIn(self._box.get_string(key), contents) + oldbox.close() def test_dump_message(self): # Write message representations to disk @@ -1835,6 +1837,10 @@ def setUp(self): # create a new maildir mailbox to work with: self._dir = test_support.TESTFN + if os.path.isdir(self._dir): + test_support.rmtree(self._dir) + if os.path.isfile(self._dir): + test_support.unlink(self._dir) os.mkdir(self._dir) os.mkdir(os.path.join(self._dir, "cur")) os.mkdir(os.path.join(self._dir, "tmp")) @@ -1844,10 +1850,10 @@ def tearDown(self): map(os.unlink, self._msgfiles) - os.rmdir(os.path.join(self._dir, "cur")) - os.rmdir(os.path.join(self._dir, "tmp")) - os.rmdir(os.path.join(self._dir, "new")) - os.rmdir(self._dir) + test_support.rmdir(os.path.join(self._dir, "cur")) + test_support.rmdir(os.path.join(self._dir, "tmp")) + test_support.rmdir(os.path.join(self._dir, "new")) + test_support.rmdir(self._dir) def createMessage(self, dir, mbox=False): t = int(time.time() % 1000000) @@ -1883,7 +1889,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1891,7 +1899,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1900,8 +1910,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 2) - self.assertIsNot(self.mbox.next(), None) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1910,11 +1924,13 @@ import email.parser fname = self.createMessage("cur", True) n = 0 - for msg in mailbox.PortableUnixMailbox(open(fname), + fid = open(fname) + for msg in mailbox.PortableUnixMailbox(fid, email.parser.Parser().parse): n += 1 self.assertEqual(msg["subject"], "Simple Test") self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE)) + fid.close() self.assertEqual(n, 1) ## End: classes from the original module (for backward compatibility). diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -11,7 +11,7 @@ def setUp(self): if os.path.exists(TESTFN): - os.unlink(TESTFN) + unlink(TESTFN) def tearDown(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): @@ -129,9 +129,13 @@ fp = os.tmpfile() except OSError, second: self.assertEqual(first.args, second.args) + return else: - self.fail("expected os.tmpfile() to raise OSError") - return + if test_support.check_impl_detail(pypy=False): + self.fail("expected os.tmpfile() to raise OSError") + # on PyPy, os.tmpfile() uses the tempfile module + # anyway, so works even if we cannot write in root. + fp.close() else: # open() worked, therefore, tmpfile() should work. Close our # dummy file and proceed with the test as normal. diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -111,13 +111,12 @@ if test_support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['subject'], - ((('countryName', u'US'),), - (('stateOrProvinceName', u'Delaware'),), - (('localityName', u'Wilmington'),), - (('organizationName', u'Python Software Foundation'),), - (('organizationalUnitName', u'SSL'),), - (('commonName', u'somemachine.python.org'),)), + ((('countryName', 'XY'),), + (('localityName', 'Castle Anthrax'),), + (('organizationName', 'Python Software Foundation'),), + (('commonName', 'localhost'),)) ) + self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),)) # Issue #13034: the subjectAltName in some certificates # (notably projects.developer.nokia.com:443) wasn't parsed p = ssl._ssl._test_decode_cert(NOKIACERT) @@ -994,7 +993,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -179,15 +179,79 @@ except KeyError: pass +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Peform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7 at 4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existance of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) + + def _rmdir(dirname): + _waitfor(os.rmdir, dirname) + + def _rmtree(path): + def _rmtree_inner(path): + for name in os.listdir(path): + fullname = os.path.join(path, name) + if os.path.isdir(fullname): + _waitfor(_rmtree_inner, fullname, waitall=True) + os.rmdir(fullname) + else: + os.unlink(fullname) + _waitfor(_rmtree_inner, path, waitall=True) + _waitfor(os.rmdir, path) +else: + _unlink = os.unlink + _rmdir = os.rmdir + _rmtree = shutil.rmtree + def unlink(filename): try: - os.unlink(filename) + _unlink(filename) except OSError: pass +def rmdir(dirname): + try: + _rmdir(dirname) + except OSError as error: + # The directory need not exist. + if error.errno != errno.ENOENT: + raise + def rmtree(path): try: - shutil.rmtree(path) + _rmtree(path) except OSError, e: # Unix returns ENOENT, Windows returns ESRCH. if e.errno not in (errno.ENOENT, errno.ESRCH): diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -300,26 +300,21 @@ def test_extract_hardlink(self): # Test hardlink extraction (e.g. bug #857297). - tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") + with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: + tar.extract("ustar/regtype", TEMPDIR) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) - tar.extract("ustar/regtype", TEMPDIR) - try: tar.extract("ustar/lnktype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("hardlink not extracted properly") + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) - data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) - - try: tar.extract("ustar/symtype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("symlink not extracted properly") - - data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) def test_extractall(self): # Test if extractall() correctly restores directory permissions @@ -340,7 +335,7 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").write("") + open(empty, "wb").close() try: tar = object.__new__(tarfile.TarFile) @@ -351,7 +346,7 @@ else: self.fail("ReadError not raised") finally: - os.remove(empty) + test_support.unlink(empty) class StreamReadTest(CommonReadTest): @@ -1327,7 +1322,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): tar = tarfile.open(self.tarname, "a", fileobj=fileobj) diff --git a/lib-python/2.7/threading.py b/lib-python/2.7/threading.py --- a/lib-python/2.7/threading.py +++ b/lib-python/2.7/threading.py @@ -244,22 +244,18 @@ if __debug__: self._note("%s.wait(): got it", self) else: - # Balancing act: We can't afford a pure busy loop, so we - # have to sleep; but if we sleep the whole timeout time, - # we'll be unresponsive. The scheme here sleeps very - # little at first, longer as time goes on, but never longer - # than 20 times per second (or the timeout time remaining). - endtime = _time() + timeout - delay = 0.0005 # 500 us -> initial delay of 1 ms - while True: - gotit = waiter.acquire(0) - if gotit: - break - remaining = endtime - _time() - if remaining <= 0: - break - delay = min(delay * 2, remaining, .05) - _sleep(delay) + # PyPy patch: use _py3k_acquire() + if timeout > 0: + try: + gotit = waiter._py3k_acquire(True, timeout) + except OverflowError: + # bah, in Python 3, acquire(True, timeout) raises + # OverflowError if the timeout is too huge. For + # forward-compatibility reasons we do the same. + waiter.acquire() + gotit = True + else: + gotit = waiter.acquire(False) if not gotit: if __debug__: self._note("%s.wait(%s): timed out", self, timeout) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py --- a/lib-python/2.7/uuid.py +++ b/lib-python/2.7/uuid.py @@ -128,10 +128,10 @@ """ if hex is not None: - if (bytes is not None or bytes_le is not None or fields is not None - or int is not None): - raise TypeError('if the hex argument is given, bytes, bytes_le, fields,' - ' and int need to be None') + if (bytes is not None or bytes_le is not None or + fields is not None or int is not None): + raise TypeError('if the hex argument is given, bytes,' + ' bytes_le, fields, and int need to be None') hex = hex.replace('urn:', '').replace('uuid:', '') hex = hex.strip('{}').replace('-', '') if len(hex) != 32: @@ -139,8 +139,8 @@ int = long(hex, 16) elif bytes_le is not None: if bytes is not None or fields is not None or int is not None: - raise TypeError('if the bytes_le argument is given, bytes, fields,' - ' and int need to be None') + raise TypeError('if the bytes_le argument is given, bytes,' + ' fields, and int need to be None') if len(bytes_le) != 16: raise ValueError('bytes_le is not a 16-char string') bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] + @@ -150,15 +150,16 @@ struct.unpack('>Q', bytes[8:])[0]) elif bytes is not None: if fields is not None or int is not None: - raise TypeError('if the bytes argument is given, fields' - ' and int need to be None') + raise TypeError('if the bytes argument is given, fields ' + 'and int need to be None') if len(bytes) != 16: raise ValueError('bytes is not a 16-char string') int = (struct.unpack('>Q', bytes[:8])[0] << 64 | struct.unpack('>Q', bytes[8:])[0]) elif fields is not None: if int is not None: - raise TypeError('if the fields argument is given, int needs to be None') + raise TypeError('if the fields argument is given, int needs' + ' to be None') if len(fields) != 6: raise ValueError('fields is not a 6-tuple') (time_low, time_mid, time_hi_version, diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py'), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof @@ -20,10 +20,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + self._buffer[len(val)] = '\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -33,8 +36,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -45,10 +47,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, unicode): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = u'\x00' res.value = property(getvalue, setvalue) if '_length_' in typedict: diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys try: from __pypy__ import builtinify diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -5,7 +5,7 @@ from _ctypes.basics import is_struct_shape from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys import traceback @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects from _ctypes.basics import sizeof, byref, as_ffi_pointer diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi import weakref import sys diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -2,6 +2,8 @@ import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject +from _ctypes.array import Array +from _ctypes.pointer import _Pointer import inspect def names_and_fields(self, _fields_, superclass, anonymous_fields=None): @@ -104,8 +106,11 @@ def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(self.num) + key = keepalive_key(self.num) + if issubclass(fieldtype, _Pointer) and isinstance(cobj, Array): + # if our value is an Array we need the whole thing alive + store_reference(obj, key, cobj) + elif ensure_objects(cobj) is not None: store_reference(obj, key, cobj._objects) arg = cobj._get_buffer_value() if fieldtype._fficompositesize is not None: diff --git a/lib_pypy/_ffi.py b/lib_pypy/_ffi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ffi.py @@ -0,0 +1,2 @@ +# Backward compatibility hack +from _rawffi.alt import * diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', @@ -322,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -363,9 +379,11 @@ pass -def connect(database, **kwargs): - factory = kwargs.get("factory", Connection) - return factory(database, **kwargs) +def connect(database, timeout=5.0, detect_types=0, isolation_level="", + check_same_thread=True, factory=None, cached_statements=100): + factory = Connection if not factory else factory + return factory(database, timeout, detect_types, isolation_level, + check_same_thread, factory, cached_statements) def _unicode_text_factory(x): @@ -982,13 +1000,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1011,12 +1034,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1166,11 +1194,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1183,7 +1219,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1302,7 +1338,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -28,9 +28,11 @@ return result elif value.typePtr == typeCache.BooleanType: - return result + return bool(value.internalRep.longValue) elif value.typePtr == typeCache.ByteArrayType: - return result + size = tkffi.new('int*') + data = tklib.Tcl_GetByteArrayFromObj(value, size) + return tkffi.buffer(data, size[0])[:] elif value.typePtr == typeCache.DoubleType: return value.internalRep.doubleValue elif value.typePtr == typeCache.IntType: @@ -50,7 +52,7 @@ result.append(FromObj(app, tcl_elem[0])) return tuple(result) elif value.typePtr == typeCache.ProcBodyType: - return result + pass # fall through and return tcl object. elif value.typePtr == typeCache.StringType: buf = tklib.Tcl_GetUnicode(value) length = tklib.Tcl_GetCharLength(value) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -72,6 +72,7 @@ int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); char *Tcl_GetString(Tcl_Obj* objPtr); char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); +unsigned char *Tcl_GetByteArrayFromObj(Tcl_Obj* objPtr, int* lengthPtr); Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); int Tcl_GetCharLength(Tcl_Obj* objPtr); @@ -111,6 +112,14 @@ incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] linklibs = ['tk85', 'tcl85'] libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,546 @@ +import __builtin__ as builtins +import math +import struct +from fractions import gcd +from ctypes import create_string_buffer + + +_buffer = buffer + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def _sample_count(cp, size): + return len(cp) / size + + +def _get_samples(cp, size, signed=True): + for i in range(_sample_count(cp, size)): + yield _get_sample(cp, size, i, signed) + + +def _struct_format(size, signed): + if size == 1: + return "b" if signed else "B" + elif size == 2: + return "h" if signed else "H" + elif size == 4: + return "i" if signed else "I" + + +def _get_sample(cp, size, i, signed=True): + fmt = _struct_format(size, signed) + start = i * size + end = start + size + return struct.unpack_from(fmt, _buffer(cp)[start:end])[0] + + +def _put_sample(cp, size, i, val, signed=True): + fmt = _struct_format(size, signed) + struct.pack_into(fmt, cp, i * size, val) + + +def _get_maxval(size, signed=True): + if signed and size == 1: + return 0x7f + elif size == 1: + return 0xff + elif signed and size == 2: + return 0x7fff + elif size == 2: + return 0xffff + elif signed and size == 4: + return 0x7fffffff + elif size == 4: + return 0xffffffff + + +def _get_minval(size, signed=True): + if not signed: + return 0 + elif size == 1: + return -0x80 From noreply at buildbot.pypy.org Wed Mar 5 12:51:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 12:51:43 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140305115143.824991C3347@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69722:d39482624aee Date: 2014-03-05 12:50 +0100 http://bitbucket.org/pypy/pypy/changeset/d39482624aee/ Log: merge heads diff --git a/pypy/module/micronumpy/tool/numready/main.py b/pypy/module/micronumpy/tool/numready/main.py --- a/pypy/module/micronumpy/tool/numready/main.py +++ b/pypy/module/micronumpy/tool/numready/main.py @@ -39,7 +39,7 @@ return len(self._items) class Item(object): - def __init__(self, name, kind, subitems=None): + def __init__(self, name, kind, subitems=[]): self.name = name self.kind = kind self.subitems = subitems @@ -72,7 +72,7 @@ items = SearchableSet() for line in lines: kind, name = line.split(" : ", 1) - subitems = None + subitems = [] if kind == KINDS["TYPE"] and name in SPECIAL_NAMES and attr is None: subitems = find_numpy_items(python, modname, name) items.add(Item(name, kind, subitems)) @@ -93,7 +93,8 @@ l[i].append(lst[k * lgt + i]) return l -SPECIAL_NAMES = ["ndarray", "dtype", "generic", "flatiter", "ufunc"] +SPECIAL_NAMES = ["ndarray", "dtype", "generic", "flatiter", "ufunc", + "nditer"] def main(argv): cpy_items = find_numpy_items("/usr/bin/python") diff --git a/pypy/module/micronumpy/tool/numready/search.py b/pypy/module/micronumpy/tool/numready/search.py --- a/pypy/module/micronumpy/tool/numready/search.py +++ b/pypy/module/micronumpy/tool/numready/search.py @@ -30,4 +30,4 @@ print kind, ":", name if __name__ == "__main__": - main(sys.argv) \ No newline at end of file + main(sys.argv) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -176,14 +176,14 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match(""" guard_not_invalidated? - i16 = int_ge(i11, i12) - guard_false(i16, descr=...) + i16 = int_lt(i11, i12) + guard_true(i16, descr=...) i20 = int_add(i11, 1) i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? - i25 = int_ge(i11, i9) - guard_false(i25, descr=...) + i25 = int_lt(i11, i9) + guard_true(i25, descr=...) i27 = int_add_ovf(i7, i11) guard_no_overflow(descr=...) --TICK-- @@ -214,10 +214,10 @@ i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? - i23 = int_lt(i18, 0) - guard_false(i23, descr=...) - i25 = int_ge(i18, i9) - guard_false(i25, descr=...) + i23 = int_ge(i18, 0) + guard_true(i23, descr=...) + i25 = int_lt(i18, i9) + guard_true(i25, descr=...) i27 = int_add_ovf(i7, i18) guard_no_overflow(descr=...) --TICK-- diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -109,6 +109,14 @@ assert not self.not_forced(r) assert r == [1, 2, 3, 4, 5, 6, 7] + def test_getitem_simple(self): + r = range(4) + assert r[-1] == 3 + assert r[3] == 3 + assert r[-4] == 0 + raises(IndexError, r.__getitem__, -5) + raises(IndexError, r.__getitem__, 4) + def test_reduce(self): it = iter(range(10)) assert it.next() == 0 diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -136,141 +136,92 @@ # - but rpython.rtyper.module.ll_os.py on Windows will replace these functions # with other wrappers that directly handle unicode strings. @specialize.argtype(0) -def open(path, flags, mode): +def _as_bytes(path): assert path is not None if isinstance(path, str): - return os.open(path, flags, mode) + return path else: - return os.open(path.as_bytes(), flags, mode) + return path.as_bytes() + + at specialize.argtype(0) +def open(path, flags, mode): + return os.open(_as_bytes(path), flags, mode) @specialize.argtype(0) def stat(path): - if isinstance(path, str): - return os.stat(path) - else: - return os.stat(path.as_bytes()) + return os.stat(_as_bytes(path)) @specialize.argtype(0) def lstat(path): - if isinstance(path, str): - return os.lstat(path) - else: - return os.lstat(path.as_bytes()) + return os.lstat(_as_bytes(path)) @specialize.argtype(0) def statvfs(path): - if isinstance(path, str): - return os.statvfs(path) - else: - return os.statvfs(path.as_bytes()) + return os.statvfs(_as_bytes(path)) @specialize.argtype(0) def unlink(path): - if isinstance(path, str): - return os.unlink(path) - else: - return os.unlink(path.as_bytes()) + return os.unlink(_as_bytes(path)) @specialize.argtype(0, 1) def rename(path1, path2): - if isinstance(path1, str): - return os.rename(path1, path2) - else: - return os.rename(path1.as_bytes(), path2.as_bytes()) + return os.rename(_as_bytes(path1), _as_bytes(path2)) @specialize.argtype(0) def listdir(dirname): - if isinstance(dirname, str): - return os.listdir(dirname) - else: - return os.listdir(dirname.as_bytes()) + return os.listdir(_as_bytes(dirname)) @specialize.argtype(0) def access(path, mode): - if isinstance(path, str): - return os.access(path, mode) - else: - return os.access(path.as_bytes(), mode) + return os.access(_as_bytes(path), mode) @specialize.argtype(0) def chmod(path, mode): - if isinstance(path, str): - return os.chmod(path, mode) - else: - return os.chmod(path.as_bytes(), mode) + return os.chmod(_as_bytes(path), mode) @specialize.argtype(0, 1) def utime(path, times): - if isinstance(path, str): - return os.utime(path, times) - else: - return os.utime(path.as_bytes(), times) + return os.utime(_as_bytes(path), times) @specialize.argtype(0) def chdir(path): - if isinstance(path, str): - return os.chdir(path) - else: - return os.chdir(path.as_bytes()) + return os.chdir(_as_bytes(path)) @specialize.argtype(0) def mkdir(path, mode=0777): - if isinstance(path, str): - return os.mkdir(path, mode) - else: - return os.mkdir(path.as_bytes(), mode) + return os.mkdir(_as_bytes(path), mode) @specialize.argtype(0) def rmdir(path): - if isinstance(path, str): - return os.rmdir(path) - else: - return os.rmdir(path.as_bytes()) + return os.rmdir(_as_bytes(path)) @specialize.argtype(0) def mkfifo(path, mode): - if isinstance(path, str): - os.mkfifo(path, mode) - else: - os.mkfifo(path.as_bytes(), mode) + os.mkfifo(_as_bytes(path), mode) @specialize.argtype(0) def mknod(path, mode, device): - if isinstance(path, str): - os.mknod(path, mode, device) - else: - os.mknod(path.as_bytes(), mode, device) + os.mknod(_as_bytes(path), mode, device) @specialize.argtype(0, 1) def symlink(src, dest): - if isinstance(src, str): - os.symlink(src, dest) - else: - os.symlink(src.as_bytes(), dest.as_bytes()) + os.symlink(_as_bytes(src), _as_bytes(dest)) if os.name == 'nt': import nt + @specialize.argtype(0) def _getfullpathname(path): - if isinstance(path, str): - return nt._getfullpathname(path) - else: - return nt._getfullpathname(path.as_bytes()) + return nt._getfullpathname(_as_bytes(path)) @specialize.argtype(0, 1) def putenv(name, value): - if isinstance(name, str): - os.environ[name] = value - else: - os.environ[name.as_bytes()] = value.as_bytes() + os.environ[_as_bytes(name)] = _as_bytes(value) @specialize.argtype(0) def unsetenv(name): - if isinstance(name, str): - del os.environ[name] - else: - del os.environ[name.as_bytes()] + del os.environ[_as_bytes(name)] if os.name == 'nt': from rpython.rlib import rwin32 From noreply at buildbot.pypy.org Wed Mar 5 16:52:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 16:52:00 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Import from the separate repository 'stmgc' Message-ID: <20140305155200.74CA21D2696@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69725:ca204b537ea9 Date: 2014-03-05 13:52 +0100 http://bitbucket.org/pypy/pypy/changeset/ca204b537ea9/ Log: Import from the separate repository 'stmgc' diff too long, truncating to 2000 out of 12026 lines diff --git a/rpython/translator/stm/import_stmgc.py b/rpython/translator/stm/import_stmgc.py --- a/rpython/translator/stm/import_stmgc.py +++ b/rpython/translator/stm/import_stmgc.py @@ -15,14 +15,14 @@ yield line def main(stmgc_dir): - stmgc_dir = py.path.local(stmgc_dir).join('c4') + stmgc_dir = py.path.local(stmgc_dir).join('c7') popen = subprocess.Popen(['hg', 'id', '-i'], cwd=str(stmgc_dir), stdout=subprocess.PIPE) rev = popen.stdout.read().strip() popen.wait() # stmgc_dest = py.path.local(__file__).join('..', 'src_stm') - plist = stmgc_dir.visit(rec=lambda p: False) + plist = stmgc_dir.visit(rec=lambda p: p.basename == 'stm') for p in sorted(plist): if not (p.basename.endswith('.c') or p.basename.endswith('.h')): continue diff --git a/rpython/translator/stm/src_stm/atomic_ops.h b/rpython/translator/stm/src_stm/atomic_ops.h deleted file mode 100644 --- a/rpython/translator/stm/src_stm/atomic_ops.h +++ /dev/null @@ -1,121 +0,0 @@ -/* Imported by rpython/translator/stm/import_stmgc.py */ -#ifndef _SRCSTM_ATOMIC_OPS_ -#define _SRCSTM_ATOMIC_OPS_ - -#include -#define IMPLIES(a, b) (!(a) || (b)) - -/* Ask the compiler to really reload the revision_t argument from memory. - That's all that this macro does; it does not imply any type of barrier. - Consider it as meaning: I want to read (or possibly write) a shared - value out of explicit synchronization now. */ -#define ACCESS_ONCE(x) (*(volatile revision_t *)&(x)) - -#define UNLIKELY(test) __builtin_expect(test, 0) - - -#if defined(__amd64__) || defined(__i386__) -# define smp_wmb() asm volatile ("":::"memory") -# define smp_spinloop() asm volatile ("pause":::"memory") -#elif defined(__powerpc__) -# define smp_wmb() asm volatile ("lwsync":::"memory") -# define smp_spinloop() asm volatile ("":::"memory") /* fill me? */ -#else -# error "Define smp_wmb() for your architecture" -#endif - - -#ifdef __llvm__ -# define HAS_SYNC_BOOL_COMPARE_AND_SWAP -# define HAS_SYNC_FETCH_AND_ADD -#endif - -#ifdef __GNUC__ -# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) -# define HAS_SYNC_BOOL_COMPARE_AND_SWAP -# define HAS_SYNC_FETCH_AND_ADD -# endif -#endif - - -#ifdef HAS_SYNC_BOOL_COMPARE_AND_SWAP -# define bool_cas __sync_bool_compare_and_swap -#else -/* x86 (32 bits and 64 bits) */ -static inline _Bool -bool_cas(revision_t *ptr, revision_t old, revision_t _new) -{ - revision_t prev; -#if defined(__amd64__) - assert(sizeof(revision_t) == 8); -#elif defined(__i386__) - assert(sizeof(revision_t) == 4); -#else -# error "the custom version of bool_cas() is only for x86 or x86-64" -#endif - asm volatile("lock;" -#if defined(__amd64__) - "cmpxchgq %1, %2;" -#else - "cmpxchgl %1, %2;" -#endif - : "=a"(prev) - : "q"(_new), "m"(*ptr), "a"(old) - : "memory"); - return prev == old; -} -/* end */ -#endif - -#ifdef HAS_SYNC_FETCH_AND_ADD -# define fetch_and_add __sync_fetch_and_add -#else -/* x86 (32 bits and 64 bits) */ -static inline revision_t -fetch_and_add(revision_t *ptr, revision_t value) -{ - revision_t prev; -#if defined(__amd64__) - assert(sizeof(revision_t) == 8); -#elif defined(__i386__) - assert(sizeof(revision_t) == 4); -#else -# error "the custom version of fetch_and_add() is only for x86 or x86-64" -#endif - asm volatile("lock;" -#if defined(__amd64__) - "xaddq %1, %2;" -#else - "xaddl %1, %2;" -#endif - : "=r"(prev) - : "0"(value), "m"(*ptr) - : "memory"); - return prev; -} -/* end */ -#endif - - -#if 0 /* fprinting versions */ -# define spinlock_acquire(lock, targetvalue) \ - do { if (bool_cas(&(lock), 0, (targetvalue))) { \ - dprintf(("<<< locked %d\n", (int)targetvalue)); \ - break; \ - } \ - do { smp_spinloop(); } while (ACCESS_ONCE(lock)); \ - } while (1) -# define spinlock_release(lock) \ - do { dprintf(("unlocked >>>\n")); smp_wmb(); \ - assert((lock) != 0); (lock) = 0; } while (0) -#else -# define spinlock_acquire(lock, targetvalue) \ - do { if (bool_cas(&(lock), 0, (targetvalue))) break; \ - do { smp_spinloop(); } while (ACCESS_ONCE(lock)); \ - } while (1) -# define spinlock_release(lock) \ - do { smp_wmb(); assert((lock) != 0); (lock) = 0; } while (0) -#endif - - -#endif /* _SRCSTM_ATOMIC_OPS_ */ diff --git a/rpython/translator/stm/src_stm/dbgmem.c b/rpython/translator/stm/src_stm/dbgmem.c deleted file mode 100644 --- a/rpython/translator/stm/src_stm/dbgmem.c +++ /dev/null @@ -1,178 +0,0 @@ -/* Imported by rpython/translator/stm/import_stmgc.py */ -#include "stmimpl.h" -#include - - -#define PAGE_SIZE 4096 -#define MEM_SIZE(mem) (*(((size_t *)(mem)) - 1)) - -#ifdef _GC_DEBUG -/************************************************************/ - -#define MMAP_TOTAL 2000*1024*1024 /* 2000MB */ - -static pthread_mutex_t malloc_mutex = PTHREAD_MUTEX_INITIALIZER; -static char *zone_start, *zone_current = NULL, *zone_end = NULL; -static signed char accessible_pages[MMAP_TOTAL / PAGE_SIZE] = {0}; - -int stm_use_mprotect = 1; - -static void _stm_dbgmem(void *p, size_t sz, int prot) -{ - if (sz == 0) - return; - - assert((ssize_t)sz > 0); - intptr_t align = ((intptr_t)p) & (PAGE_SIZE-1); - p = ((char *)p) - align; - sz += align; - if (stm_use_mprotect) { - dprintf(("dbgmem: %p, %ld, %d\n", p, (long)sz, prot)); - int err = mprotect(p, sz, prot); - assert(err == 0); - } -} - -void *stm_malloc(size_t sz) -{ - size_t real_sz = sz + sizeof(size_t); - -#ifdef _GC_MEMPROTECT - pthread_mutex_lock(&malloc_mutex); - if (zone_current == NULL) { - zone_start = mmap(NULL, MMAP_TOTAL, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (zone_start == NULL || zone_start == MAP_FAILED) { - stm_fatalerror("not enough memory: mmap() failed\n"); - } - zone_current = zone_start; - zone_end = zone_start + MMAP_TOTAL; - assert((MMAP_TOTAL % PAGE_SIZE) == 0); - - _stm_dbgmem(zone_start, MMAP_TOTAL, PROT_NONE); - } - - size_t nb_pages = (real_sz + PAGE_SIZE - 1) / PAGE_SIZE + 1; - char *result = zone_current; - zone_current += nb_pages * PAGE_SIZE; - if (zone_current > zone_end) { - stm_fatalerror("dbgmem.c: %ld MB of memory have been exausted\n", - (long)(MMAP_TOTAL / (1024*1024))); - } - pthread_mutex_unlock(&malloc_mutex); - - result += (-real_sz) & (PAGE_SIZE-1); - assert(((intptr_t)(result + real_sz) & (PAGE_SIZE-1)) == 0); - _stm_dbgmem(result, real_sz, PROT_READ | PROT_WRITE); - - long i, base = (result - zone_start) / PAGE_SIZE; - for (i = 0; i < nb_pages; i++) - accessible_pages[base + i] = 42; - - assert(((intptr_t)(result + real_sz) & (PAGE_SIZE-1)) == 0); -#else - char * result = malloc(real_sz); -#endif - - dprintf(("stm_malloc(%zu): %p\n", sz, result)); - memset(result, 0xBB, real_sz); - - result += sizeof(size_t); - MEM_SIZE(result) = real_sz; - return result; -} - -void stm_free(void *p) -{ - if (p == NULL) { - return; - } - size_t real_sz = MEM_SIZE(p); - void *real_p = p - sizeof(size_t); - assert(real_sz > 0); - - memset(real_p, 0xDD, real_sz); -#ifdef _GC_MEMPROTECT - assert(((intptr_t)((char *)real_p + real_sz) & (PAGE_SIZE-1)) == 0); - - size_t nb_pages = (real_sz + PAGE_SIZE - 1) / PAGE_SIZE + 1; - long i, base = ((char *)real_p - zone_start) / PAGE_SIZE; - assert(0 <= base && base < (MMAP_TOTAL / PAGE_SIZE)); - for (i = 0; i < nb_pages; i++) { - assert(accessible_pages[base + i] == 42); - accessible_pages[base + i] = -1; - } - - _stm_dbgmem(real_p, real_sz, PROT_NONE); -#endif -} - -void *stm_realloc(void *p, size_t newsz, size_t oldsz) -{ - void *r = stm_malloc(newsz); - memcpy(r, p, oldsz < newsz ? oldsz : newsz); - stm_free(p); - return r; -} - -int _stm_can_access_memory(char *p) -{ -#ifndef _GC_MEMPROTECT - assert(0); /* tests must use MEMPROTECT */ -#endif - char* real_p = p - sizeof(size_t); - long base = ((char *)real_p - zone_start) / PAGE_SIZE; - assert(0 <= base && base < (MMAP_TOTAL / PAGE_SIZE)); - return accessible_pages[base] == 42; -} - -void assert_cleared(char *p, size_t size) -{ - size_t i; - for (i = 0; i < size; i++) - assert(p[i] == 0); -} - -/************************************************************/ -#endif - - -void stm_clear_large_memory_chunk(void *base, size_t size, - size_t already_cleared) -{ - char *baseaddr = base; - assert(already_cleared <= size); - -#if !defined(_USE_VALGRIND) - if (size > 2 * PAGE_SIZE) { - int lowbits = ((intptr_t)baseaddr) & (PAGE_SIZE-1); - if (lowbits) { /* clear the initial misaligned part, if any */ - int partpage = PAGE_SIZE - lowbits; - memset(baseaddr, 0, partpage); - baseaddr += partpage; - size -= partpage; - } - /* 'already_cleared' bytes at the end are assumed to be already - cleared. Reduce 'size' accordingly, but avoid getting a - misaligned 'size'. */ - size_t length = size & (-PAGE_SIZE); - if (already_cleared > (size - length)) { - already_cleared -= (size - length); - already_cleared &= -PAGE_SIZE; - length -= already_cleared; - size = length; - already_cleared = 0; - } - - int err = madvise(baseaddr, length, MADV_DONTNEED); - if (err == 0) { /* madvise() worked */ - baseaddr += length; - size -= length; - } - } -#endif - if (size > already_cleared) { /* clear the final misaligned part, if any */ - memset(baseaddr, 0, size - already_cleared); - } - assert_cleared(base, size); -} diff --git a/rpython/translator/stm/src_stm/dbgmem.h b/rpython/translator/stm/src_stm/dbgmem.h deleted file mode 100644 --- a/rpython/translator/stm/src_stm/dbgmem.h +++ /dev/null @@ -1,26 +0,0 @@ -/* Imported by rpython/translator/stm/import_stmgc.py */ -#ifndef _SRCSTM_DBGMEM_H -#define _SRCSTM_DBGMEM_H - - -#ifdef _GC_DEBUG - -void *stm_malloc(size_t); -void stm_free(void *); -void *stm_realloc(void *, size_t, size_t); -int _stm_can_access_memory(char *); -void assert_cleared(char *, size_t); - -#else - -#define stm_malloc(sz) malloc(sz) -#define stm_free(p) free(p) -#define stm_realloc(p,newsz,oldsz) realloc(p,newsz) -#define assert_cleared(p,sz) do { } while(0) - -#endif - -void stm_clear_large_memory_chunk(void *, size_t, size_t); - - -#endif diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c deleted file mode 100644 --- a/rpython/translator/stm/src_stm/et.c +++ /dev/null @@ -1,1887 +0,0 @@ -/* Imported by rpython/translator/stm/import_stmgc.py */ -/* -*- c-basic-offset: 2 -*- */ - -/* XXX assumes that time never wraps around (in a 'long'), which may be - * correct on 64-bit machines but not on 32-bit machines if the process - * runs for long enough. - */ -#include "stmimpl.h" - -char* stm_dbg_get_hdr_str(gcptr obj) -{ - static char tmp_buf[128]; - char *cur; - char *flags[] = GC_FLAG_NAMES; - int i; - - i = 0; - cur = tmp_buf; - cur += sprintf(cur, "%p : ", obj); - while (flags[i]) { - if (obj->h_tid & (STM_FIRST_GCFLAG << i)) { - cur += sprintf(cur, "%s|", flags[i]); - } - i++; - } - cur += sprintf(cur, "tid=%ld", stm_get_tid(obj)); - cur += sprintf(cur, " : rev=0x%lx : orig=0x%lx", - (long)obj->h_revision, (long)obj->h_original); - return tmp_buf; -} - -void stm_dump_dbg(void) -{ - fprintf(stderr, "/**** stm_dump_dbg ****/\n\n"); - - int i; - for (i = 0; i < MAX_THREADS; i++) { - struct tx_public_descriptor *pd = stm_descriptor_array[i]; - if (pd == NULL) - continue; - fprintf(stderr, "stm_descriptor_array[%d]\n((struct tx_public_descriptor *)%p)\n", - i, pd); - - struct tx_descriptor *d = stm_tx_head; - while (d && d->public_descriptor != pd) - d = d->tx_next; - if (!d) { - fprintf(stderr, "\n"); - continue; - } - - fprintf(stderr, "((struct tx_descriptor *)\033[%dm%p\033[0m)\n" - "pthread_self = 0x%lx\n\n", d->tcolor, d, (long)d->pthreadid); - } - - fprintf(stderr, "/**********************/\n"); -} - - -__thread int stm_active; -__thread struct tx_descriptor *thread_descriptor = NULL; - -/* 'global_cur_time' is normally a multiple of 2, except when we turn - a transaction inevitable: we then add 1 to it. */ -static revision_t global_cur_time = 2; - -/* a negative odd number that identifies the currently running - transaction within the thread. */ -__thread revision_t stm_private_rev_num; - - -revision_t stm_global_cur_time(void) /* for tests */ -{ - return global_cur_time; -} -revision_t get_private_rev_num(void) /* for tests */ -{ - return stm_private_rev_num; -} -struct tx_descriptor *stm_thread_descriptor(void) /* for tests */ -{ - return thread_descriptor; -} -static int is_private(gcptr P) -{ - return (P->h_revision == stm_private_rev_num) || - (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); -} -int _stm_is_private(gcptr P) -{ - return is_private(P); -} -void stm_clear_read_cache(void) -{ - fxcache_clear(&thread_descriptor->recent_reads_cache); -} - -/************************************************************/ - -static void ValidateNow(struct tx_descriptor *); -static void CancelLocks(struct tx_descriptor *d); - -static _Bool is_inevitable(struct tx_descriptor *d) -{ - /* Assert that we are running a transaction. - * Returns True if this transaction is inevitable. */ - assert(*d->active_ref == 1 + !d->setjmp_buf); - return *d->active_ref == 2; -} - -static pthread_mutex_t mutex_inevitable = PTHREAD_MUTEX_INITIALIZER; - -static void inev_mutex_release(void) -{ - pthread_mutex_unlock(&mutex_inevitable); -} - -static void inev_mutex_acquire(struct tx_descriptor *d) -{ /* must save roots around this call */ - stm_stop_sharedlock(); - pthread_mutex_lock(&mutex_inevitable); - stm_start_sharedlock(); - - if (*d->active_ref < 0) - { - inev_mutex_release(); - AbortNowIfDelayed(); - abort(); /* unreachable */ - } -} - -/************************************************************/ - -gcptr stm_DirectReadBarrier(gcptr G) -{ - struct tx_descriptor *d = thread_descriptor; - gcptr P = G; - revision_t v; - - d->count_reads++; - assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), stmgc_is_in_nursery(d, P))); - assert(G->h_revision != 0); - - restart_all: - if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) - { - assert(IS_POINTER(P->h_revision)); /* pointer to the backup copy */ - - /* check P->h_revision->h_revision: if a pointer, then it means - the backup copy has been stolen into a public object and then - modified by some other thread. Abort. */ - if (IS_POINTER(((gcptr)P->h_revision)->h_revision)) - AbortTransaction(ABRT_STOLEN_MODIFIED); - - goto add_in_recent_reads_cache; - } - /* else, for the rest of this function, we can assume that P was not - a private copy */ - - if (P->h_tid & GCFLAG_PUBLIC) - { - /* follow the chained list of h_revision's as long as they are - regular pointers. We will only find more public objects - along this chain. - */ - restart_all_public: - assert(P->h_tid & GCFLAG_PUBLIC); - v = ACCESS_ONCE(P->h_revision); - if (IS_POINTER(v)) /* "is a pointer", "has a more recent revision" */ - { - retry: - if (v & 2) - goto follow_stub; - - gcptr P_prev = P; - P = (gcptr)v; - assert((P->h_tid & GCFLAG_PUBLIC) || - (P_prev->h_tid & GCFLAG_MOVED)); - - v = ACCESS_ONCE(P->h_revision); - - if (IS_POINTER(v)) - { - if (v & 2) - goto follow_stub; - - /* we update P_prev->h_revision as a shortcut - P_prev->P->v => P_prev->v */ - /* XXX check if this really gives a worse performance than only - doing this write occasionally based on a counter in d */ - P_prev->h_revision = v; - P = (gcptr)v; - v = ACCESS_ONCE(P->h_revision); - if (IS_POINTER(v)) - goto retry; - } - - /* We reach this point if P != G only. Check again the - read_barrier_cache: if P now hits the cache, just return it - */ - if (FXCACHE_AT(P) == P) - { - dprintf(("read_barrier: %p -> %p fxcache\n", G, P)); - return P; - } - } - - /* If we land on a P with GCFLAG_PUBLIC_TO_PRIVATE, it might be - because *we* have an entry in d->public_to_private. (It might - also be someone else.) - */ - if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) - { - wlog_t *item; - retry_public_to_private:; - G2L_FIND(d->public_to_private, P, item, goto no_private_obj); - - /* We have a key in 'public_to_private'. The value is the - corresponding private object. */ - P = item->val; - assert(!(P->h_tid & GCFLAG_PUBLIC)); - assert(is_private(P)); - dprintf(("read_barrier: %p -> %p public_to_private\n", G, P)); - return P; - - no_private_obj: - /* Key not found. It might be because there really is none, or - because we still have it waiting in 'stolen_objects'. */ - if (d->public_descriptor->stolen_objects.size > 0) - { - spinlock_acquire(d->public_descriptor->collection_lock, 'N'); - stm_normalize_stolen_objects(d); - spinlock_release(d->public_descriptor->collection_lock); - goto retry_public_to_private; - } - } - - /* The answer is a public object. Is it too recent? */ - if (UNLIKELY(v > d->start_time)) - { - if (v >= LOCKED) - { - SpinLoop(SPLP_LOCKED_INFLIGHT); - goto restart_all_public; // spinloop until it is no longer LOCKED - } - ValidateNow(d); // try to move start_time forward - goto restart_all_public; // restart searching from P - } - dprintf(("read_barrier: %p -> %p public\n", G, P)); - } - else - { - /* Not private and not public: it's a protected object - */ - dprintf(("read_barrier: %p -> %p protected\n", G, P)); - - /* The risks are not high, but in parallel it's possible for the - object to be stolen by another thread and become public, after - which it can be outdated by another commit. So the following - assert can actually fail in that case. */ - /*assert(P->h_revision & 1);*/ - } - - dprintf(("readobj: %p\n", P)); - assert(!(P->h_tid & GCFLAG_STUB)); - gcptrlist_insert(&d->list_of_read_objects, P); - - add_in_recent_reads_cache: - /* The risks are that the following assert fails, because the flag was - added just now by a parallel thread during stealing... */ - /*assert(!(P->h_tid & GCFLAG_MOVED));*/ - fxcache_add(&d->recent_reads_cache, P); - return P; - - follow_stub:; - /* We know that P is a stub object, because only stubs can have - an h_revision that is == 2 mod 4. - */ - struct tx_public_descriptor *foreign_pd = STUB_THREAD(P); - if (foreign_pd == d->public_descriptor) - { - /* Same thread: dereference the pointer directly. It's possible - we reach any kind of object, even a public object, in case it - was stolen. So we just repeat the whole procedure. */ - P = (gcptr)(v - 2); - dprintf(("read_barrier: %p -> %p via stub\n ", G, P)); - - if (UNLIKELY((P->h_revision != stm_private_rev_num) && - (FXCACHE_AT(P) != P))) - goto restart_all; - - return P; - } - else - { - /* stealing */ - dprintf(("read_barrier: %p -> stealing %p...\n ", G, P)); - stm_steal_stub(P); - - assert(P->h_tid & GCFLAG_PUBLIC); - goto restart_all_public; - } -} - -gcptr stm_RepeatReadBarrier(gcptr P) -{ - /* Version of stm_DirectReadBarrier() that doesn't abort and assumes - * that 'P' was already an up-to-date result of a previous - * stm_DirectReadBarrier(). We only have to check if we did in the - * meantime a stm_write_barrier(). Should only be called if we - * have the flag PUBLIC_TO_PRIVATE or on MOVED objects. This version - * should never abort (it is used in stm_decode_abort_info()). - */ - assert(P->h_tid & GCFLAG_PUBLIC); - assert(!(P->h_tid & GCFLAG_STUB)); - assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), - stmgc_is_in_nursery(thread_descriptor, P))); - - - if (P->h_tid & GCFLAG_MOVED) - { - dprintf(("repeat_read_barrier: %p -> %p moved\n", P, - (gcptr)P->h_revision)); - P = (gcptr)P->h_revision; - assert(P->h_tid & GCFLAG_PUBLIC); - assert(!(P->h_tid & GCFLAG_STUB)); - assert(!(P->h_tid & GCFLAG_MOVED)); - if (!(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)) - return P; - } - assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - - struct tx_descriptor *d = thread_descriptor; - wlog_t *item; - G2L_FIND(d->public_to_private, P, item, goto no_private_obj); - - /* We have a key in 'public_to_private'. The value is the - corresponding private object. */ - dprintf(("repeat_read_barrier: %p -> %p public_to_private\n", P, item->val)); - P = item->val; - assert(!(P->h_tid & GCFLAG_PUBLIC)); - assert(!(P->h_tid & GCFLAG_STUB)); - assert(is_private(P)); - return P; - - no_private_obj: - /* Key not found. It should not be waiting in 'stolen_objects', - because this case from steal.c applies to objects to were originally - backup objects. 'P' cannot be a backup object if it was obtained - earlier as a result of stm_read_barrier(). - */ - return P; -} - -gcptr stm_ImmutReadBarrier(gcptr P) -{ - assert(P->h_tid & GCFLAG_STUB); - assert(P->h_tid & GCFLAG_PUBLIC); - assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), - stmgc_is_in_nursery(thread_descriptor, P))); - assert(P->h_revision != 0); - - revision_t v = ACCESS_ONCE(P->h_revision); - assert(IS_POINTER(v)); /* "is a pointer", "has a more recent revision" */ - - if (!(v & 2)) - { - P = (gcptr)v; - } - else - { - /* follow a stub reference */ - struct tx_descriptor *d = thread_descriptor; - struct tx_public_descriptor *foreign_pd = STUB_THREAD(P); - if (foreign_pd == d->public_descriptor) - { - /* Same thread: dereference the pointer directly. */ - dprintf(("immut_read_barrier: %p -> %p via stub\n ", P, - (gcptr)(v - 2))); - P = (gcptr)(v - 2); - } - else - { - /* stealing: needed because accessing v - 2 from this thread - is forbidden (the target might disappear under our feet) */ - dprintf(("immut_read_barrier: %p -> stealing...\n ", P)); - stm_steal_stub(P); - } - } - return stm_immut_read_barrier(P); /* retry */ -} - -static gcptr _match_public_to_private(gcptr P, gcptr pubobj, gcptr privobj, - int from_stolen) -{ - gcptr org_pubobj = pubobj; - while ((pubobj->h_revision & 3) == 0) - { - assert(pubobj != P); - pubobj = (gcptr)pubobj->h_revision; - } - if (pubobj == P || ((P->h_revision & 3) == 2 && - pubobj->h_revision == P->h_revision)) - { - assert(!(org_pubobj->h_tid & GCFLAG_STUB)); - assert(!(privobj->h_tid & GCFLAG_PUBLIC)); - assert(is_private(privobj)); - if (P != org_pubobj) - dprintf(("| actually %p ", org_pubobj)); - if (from_stolen) - dprintf(("-stolen")); - else - assert(org_pubobj->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - dprintf(("-public_to_private-> %p private\n", privobj)); - return privobj; - } - return NULL; -} - -static gcptr _find_public_to_private(gcptr P) -{ - gcptr R; - wlog_t *item; - struct tx_descriptor *d = thread_descriptor; - - G2L_LOOP_FORWARD(d->public_to_private, item) - { - assert(item->addr->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - R = _match_public_to_private(P, item->addr, item->val, 0); - if (R != NULL) - return R; - - } G2L_LOOP_END; - - long i, size = d->public_descriptor->stolen_objects.size; - gcptr *items = d->public_descriptor->stolen_objects.items; - - for (i = 0; i < size; i += 2) - { - if (items[i + 1] == NULL) - continue; - R = _match_public_to_private(P, items[i], items[i + 1], 1); - if (R != NULL) - return R; - } - - return NULL; -} - -static void _check_flags(gcptr P) -{ -#ifndef NDEBUG - struct tx_descriptor *d = thread_descriptor; - if (P->h_tid & GCFLAG_STUB) - { - dprintf(("S")); - } - int is_old = (P->h_tid & GCFLAG_OLD) != 0; - int in_nurs = (d->nursery_base <= (char*)P && ((char*)P) < d->nursery_end); - if (in_nurs) - { - assert(!is_old); - dprintf(("Y ")); - } - else - { - assert(is_old); - dprintf(("O ")); - } -#endif -} - -gcptr _stm_nonrecord_barrier(gcptr P) -{ - /* follows the logic in stm_DirectReadBarrier() */ - struct tx_descriptor *d = thread_descriptor; - revision_t v; - - dprintf(("_stm_nonrecord_barrier: %p ", P)); - _check_flags(P); - - restart_all: - if (P->h_revision == stm_private_rev_num) - { - /* private */ - dprintf(("private\n")); - return P; - } - - if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) - { - /* private too, with a backup copy */ - assert(IS_POINTER(P->h_revision)); - dprintf(("private_from_protected\n")); - return P; - } - - if (P->h_tid & GCFLAG_PUBLIC) - { - dprintf(("public ")); - - while (v = P->h_revision, IS_POINTER(v)) - { - if (P->h_tid & GCFLAG_MOVED) - dprintf(("nursery_moved ")); - - if (v & 2) - { - dprintf(("stub ")); - gcptr L = _find_public_to_private(P); - if (L != NULL) - return L; - goto follow_stub; - } - - P = (gcptr)v; - assert(P->h_tid & GCFLAG_PUBLIC); - dprintf(("-> %p public ", P)); - _check_flags(P); - } - - gcptr L = _find_public_to_private(P); - if (L != NULL) - return L; - - if (UNLIKELY(v > d->start_time)) - { - dprintf(("too recent!\n")); - return NULL; // object too recent - } - dprintf(("\n")); - } - else - { - dprintf(("protected\n")); - } - return P; - - follow_stub:; - if (STUB_THREAD(P) == d->public_descriptor) - { - P = (gcptr)(v - 2); - dprintf(("-> %p ", P)); - _check_flags(P); - } - else - { - P = (gcptr)(v - 2); - /* cannot _check_flags(P): foreign! */ - dprintf(("-foreign-> %p ", P)); - if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) - { - P = (gcptr)P->h_revision; /* the backup copy */ - /* cannot _check_flags(P): foreign! */ - dprintf(("-backup-> %p ", P)); - } - if (!(P->h_tid & GCFLAG_PUBLIC)) - { - dprintf(("protected by someone else!\n")); - return (gcptr)-1; - } - } - /* cannot _check_flags(P): foreign! */ - goto restart_all; -} - -static gcptr LocalizeProtected(struct tx_descriptor *d, gcptr P) -{ - gcptr B; - - assert(P->h_revision != stm_private_rev_num); - assert(P->h_revision & 1); - assert(!(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); - assert(!(P->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(P->h_tid & GCFLAG_STUB)); - assert(!(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - - B = stmgc_duplicate_old(P); - B->h_tid |= GCFLAG_BACKUP_COPY; - B->h_tid &= ~GCFLAG_HAS_ID; - - if (!(P->h_original) && (P->h_tid & GCFLAG_OLD)) { - /* if P is old, it must be the original - if P is young, it will create a shadow original later - or it's getting decided when backup gets stolen. - */ - B->h_original = (revision_t)P; - } - - P->h_tid |= GCFLAG_PRIVATE_FROM_PROTECTED; - P->h_revision = (revision_t)B; - - gcptrlist_insert(&d->private_from_protected, P); - dprintf(("private_from_protected: insert %p (backup %p)\n", P, B)); - - return P; /* always returns its arg: the object is converted in-place */ -} - -static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R) -{ - assert(R->h_tid & GCFLAG_PUBLIC); - assert(!(R->h_tid & GCFLAG_MOVED)); - -#ifdef _GC_DEBUG - wlog_t *entry; - G2L_FIND(d->public_to_private, R, entry, goto not_found); - stm_fatalerror("R is already in public_to_private\n"); - not_found: -#endif - - assert(!(R->h_tid & GCFLAG_STUB)); - R->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; - - /* note that stmgc_duplicate() usually returns a young object, but may - return an old one if the nursery is full at this moment. */ - gcptr L = stmgc_duplicate(R); - if (!(L->h_original) || L->h_tid & GCFLAG_PREBUILT_ORIGINAL) { - /* if we don't have an original object yet, it must be the - old public R - Also, prebuilt objects may have a predefined hash stored - in the h_original. -> point to the original copy on copies - of the prebuilt. - */ - assert(R->h_tid & GCFLAG_OLD); // if not, force stm_id?? - L->h_original = (revision_t)R; - } - - assert(!(L->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(L->h_tid & GCFLAG_STUB)); - assert(!(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - L->h_tid &= ~(GCFLAG_VISITED | - GCFLAG_MARKED | - GCFLAG_PUBLIC | - GCFLAG_PREBUILT_ORIGINAL | - GCFLAG_PUBLIC_TO_PRIVATE | - GCFLAG_WRITE_BARRIER | - 0); - L->h_revision = stm_private_rev_num; - assert(stm_private_rev_num < 0); - assert(stm_private_rev_num & 1); - g2l_insert(&d->public_to_private, R, L); - dprintf(("write_barrier: adding %p -> %p to public_to_private\n", - R, L)); - - /* must remove R from the read_barrier_cache, because returning R is no - longer a valid result */ - fxcache_remove(&d->recent_reads_cache, R); - - return L; -} - -static inline void record_write_barrier(gcptr P) -{ - assert(is_private(P)); - assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), - stmgc_is_in_nursery(thread_descriptor, P))); - if (P->h_tid & GCFLAG_WRITE_BARRIER) - { - assert(P->h_tid & GCFLAG_OLD); - P->h_tid &= ~GCFLAG_WRITE_BARRIER; - gcptrlist_insert(&thread_descriptor->old_objects_to_trace, P); - } -} - -gcptr stm_RepeatWriteBarrier(gcptr P) -{ - assert(P->h_revision != 0); - assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), - stmgc_is_in_nursery(thread_descriptor, P))); - - assert(!(P->h_tid & GCFLAG_IMMUTABLE)); - assert(is_private(P)); - assert(P->h_tid & GCFLAG_WRITE_BARRIER); - P->h_tid &= ~GCFLAG_WRITE_BARRIER; - gcptrlist_insert(&thread_descriptor->old_objects_to_trace, P); - return P; -} - -gcptr stm_WriteBarrier(gcptr P) -{ - assert(P->h_revision != 0); - assert(!(P->h_tid & GCFLAG_IMMUTABLE)); - assert((P->h_tid & GCFLAG_STUB) || - stmgc_size(P) > sizeof(struct stm_stub_s) - WORD); - /* If stmgc_size(P) gives a number <= sizeof(stub)-WORD, then there is a - risk of overrunning the object later in gcpage.c when copying a stub - over it. However such objects are so small that they contain no field - at all, and so no write barrier should occur on them. */ - - assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), - stmgc_is_in_nursery(thread_descriptor, P))); - - if (is_private(P)) - { - /* If we have GCFLAG_WRITE_BARRIER in P, then list it into - old_objects_to_trace: it's a private object that may be - modified by the program after we return, and the mutation may - be to write young pointers (in fact it's a common case). - */ - record_write_barrier(P); - return P; - } - - gcptr R, W; - R = stm_read_barrier(P); - - if (is_private(R)) - { - record_write_barrier(R); - return R; - } - - struct tx_descriptor *d = thread_descriptor; - assert(stm_active >= 1); - - /* We need the collection_lock for the sequel; this is required notably - because we're about to edit flags on a protected object. - */ - spinlock_acquire(d->public_descriptor->collection_lock, 'L'); - if (d->public_descriptor->stolen_objects.size != 0) - stm_normalize_stolen_objects(d); - - if (R->h_tid & GCFLAG_PUBLIC) - { - /* Make and return a new (young) private copy of the public R. - Add R into the list 'public_with_young_copy', unless W is - actually an old object, in which case we need to record W. - */ - if (R->h_tid & GCFLAG_MOVED) - { - /* Bah, the object turned into this kind of stub, possibly - while we were waiting for the collection_lock, because it - was stolen by someone else. Use R->h_revision instead. */ - assert(IS_POINTER(R->h_revision)); - R = (gcptr)R->h_revision; - assert(R->h_tid & GCFLAG_PUBLIC); - } - assert(R->h_tid & GCFLAG_OLD); - W = LocalizePublic(d, R); - assert(is_private(W)); - - if (W->h_tid & GCFLAG_OLD) { - /* XXX: probably unnecessary as it is done in allocate_next_section - already */ - gcptrlist_insert(&d->old_objects_to_trace, W); - } else - gcptrlist_insert(&d->public_with_young_copy, R); - } - else - { - /* Turn the protected copy in-place into a private copy. If it's - an old object that still has GCFLAG_WRITE_BARRIER, then we must - also record it in the list 'old_objects_to_trace'. */ - W = LocalizeProtected(d, R); - assert(is_private(W)); - record_write_barrier(W); - } - - spinlock_release(d->public_descriptor->collection_lock); - - dprintf(("write_barrier: %p -> %p -> %p\n", P, R, W)); - - return W; -} - -gcptr stm_get_private_from_protected(long index) -{ - struct tx_descriptor *d = thread_descriptor; - if (index < gcptrlist_size(&d->private_from_protected)) - return d->private_from_protected.items[index]; - return NULL; -} - -gcptr stm_get_read_obj(long index) -{ - struct tx_descriptor *d = thread_descriptor; - if (index < gcptrlist_size(&d->list_of_read_objects)) - return d->list_of_read_objects.items[index]; - return NULL; -} - -/************************************************************/ - -static revision_t GetGlobalCurTime(struct tx_descriptor *d) -{ - assert(!is_inevitable(d)); // must not be myself inevitable - return ACCESS_ONCE(global_cur_time) & ~1; -} - -static _Bool ValidateDuringTransaction(struct tx_descriptor *d, - _Bool during_commit) -{ - long i, size = d->list_of_read_objects.size; - gcptr *items = d->list_of_read_objects.items; - - for (i=0; ih_revision); - if (IS_POINTER(v)) /* "is a pointer", i.e. has a more recent revision */ - { - if (R->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) - { - /* such an object R might be listed in list_of_read_objects - before it was turned from protected to private */ - if (((gcptr)v)->h_tid & GCFLAG_PUBLIC) - { - /* The backup was stolen, but maybe not modified - afterwards. Check it. */ - R = (gcptr)v; - goto retry; - } - else - { - /* The backup was not stolen, everything's fine */ - continue; - } - } - else if ((R->h_tid & (GCFLAG_PUBLIC | GCFLAG_MOVED)) - == (GCFLAG_PUBLIC | GCFLAG_MOVED)) - { - /* such an object is identical to the one it points to - (stolen protected young object with h_revision pointing - to the new copy) */ - R = (gcptr)v; - goto retry; - } - else - { - dprintf(("validation failed: " - "%p has a more recent revision\n", R)); - return 0; - } - } - if (v >= LOCKED) // locked - { - if (!during_commit) - { - assert(v != d->my_lock); // we don't hold any lock - /* spinloop until the other thread releases its lock */ - SpinLoop(SPLP_LOCKED_VALIDATE); - goto retry; - } - else - { - if (v != d->my_lock) // not locked by me: conflict - { - /* It's delicate here to do a spinloop rather than - just aborting. - - A case that can occur: two threads A and B are both - committing, thread A locked object a, thread B - locked object b, and then thread A tries to - validate the reads it did on object b and - vice-versa. In this case both threads cannot - commit, but if they both enter the SpinLoop() - here, then they will livelock. - - Another case: thread A might be blocked in this - spinloop, while thread B is blocked in the - SpinLoop(SPLP_LOCKED_COMMIT) below. - - For now we always abort. - */ - dprintf(("validation failed: " - "%p is locked by another thread\n", R)); - return 0; - } - } - } - } - return 1; -} - -static void ValidateNow(struct tx_descriptor *d) -{ - d->start_time = GetGlobalCurTime(d); // copy from the global time - dprintf(("et.c: ValidateNow: %ld\n", (long)d->start_time)); - - /* subtle: we have to normalize stolen objects, because doing so - might add a few extra objects in the list_of_read_objects */ - if (d->public_descriptor->stolen_objects.size != 0) - { - spinlock_acquire(d->public_descriptor->collection_lock, 'N'); - stm_normalize_stolen_objects(d); - spinlock_release(d->public_descriptor->collection_lock); - } - - if (!ValidateDuringTransaction(d, 0)) - AbortTransaction(ABRT_VALIDATE_INFLIGHT); -} - -/************************************************************/ - -void SpinLoop(int num) -{ - struct tx_descriptor *d = thread_descriptor; - assert(stm_active >= 1); - assert(num < SPINLOOP_REASONS); - d->num_spinloops[num]++; - smp_spinloop(); -} - -static void purge_private_objs_from_old_objects_to_trace() -{ - struct tx_descriptor *d = thread_descriptor; - int i, size = d->old_objects_to_trace.size; - gcptr *items = d->old_objects_to_trace.items; - - for(i = 0; i < size; i++) { - if (items[i] && items[i]->h_revision == stm_private_rev_num) { - /* private objects from the same aborting transaction */ - items[i] = NULL; - dprintf(("purge old private object %p\n", items[i])); - } - } -} - -void stm_abort_and_retry(void) -{ - AbortTransaction(ABRT_MANUAL); -} - -void AbortPrivateFromProtected(struct tx_descriptor *d); - -void AbortTransaction(int num) -{ - static const char *abort_names[] = ABORT_NAMES; - struct tx_descriptor *d = thread_descriptor; - unsigned long limit; - struct timespec now; - long long elapsed_time; - - /* acquire the lock, but don't double-acquire it if already committing */ - if (d->public_descriptor->collection_lock != 'C') - { - spinlock_acquire(d->public_descriptor->collection_lock, 'C'); - if (d->public_descriptor->stolen_objects.size != 0) - stm_normalize_stolen_objects(d); - assert(!stm_has_got_any_lock(d)); - } - else - { - CancelLocks(d); - assert(!stm_has_got_any_lock(d)); - } - - assert(stm_active != 0); - assert(!is_inevitable(d)); - assert(num < ABORT_REASONS); - d->num_aborts[num]++; - - /* compute the elapsed time */ - if (d->start_real_time.tv_nsec != -1 && - clock_gettime(CLOCK_MONOTONIC, &now) >= 0) { - elapsed_time = now.tv_sec - d->start_real_time.tv_sec; - elapsed_time *= 1000000000; - elapsed_time += now.tv_nsec - d->start_real_time.tv_nsec; - if (elapsed_time < 1) - elapsed_time = 1; - } - else { - elapsed_time = 1; - } - - if (elapsed_time >= d->longest_abort_info_time) - { - /* decode the 'abortinfo' and produce a human-readable summary in - the string 'longest_abort_info' */ - size_t size = stm_decode_abort_info(d, elapsed_time, num, NULL); - free(d->longest_abort_info); - d->longest_abort_info = malloc(size); - if (d->longest_abort_info == NULL) - d->longest_abort_info_time = 0; /* out of memory! */ - else - { - if (stm_decode_abort_info(d, elapsed_time, num, - (struct tx_abort_info *)d->longest_abort_info) != size) - stm_fatalerror("during stm abort: object mutated unexpectedly\n"); - - d->longest_abort_info_time = elapsed_time; - } - } - - /* upon abort, set the reads size limit to 94% of how much was read - so far. This should ensure that, assuming the retry does the same - thing, it will commit just before it reaches the conflicting point. - Note that we should never *increase* the read length limit here. */ - limit = d->count_reads; - if (limit > d->reads_size_limit_nonatomic) { /* can occur if atomic */ - limit = d->reads_size_limit_nonatomic; - } - if (limit > 0) { - limit -= (limit >> 4); - d->reads_size_limit_nonatomic = limit; - } - - AbortPrivateFromProtected(d); - gcptrlist_clear(&d->list_of_read_objects); - g2l_clear(&d->public_to_private); - - /* 'old_thread_local_obj' contains the old value from stm_thread_local_obj, - but only when the transaction can be aborted; when it is inevitable - old_thread_local_obj will be reset to NULL. */ - assert(d->thread_local_obj_ref = &stm_thread_local_obj); - stm_thread_local_obj = d->old_thread_local_obj; - d->old_thread_local_obj = NULL; - - /* remove old private objects from old_objects_to_trace - because they never have to be traced (also because - weakrefs are kept alive even when their target is not - and stm_move_young_weakrefs doesn't handle that). */ - purge_private_objs_from_old_objects_to_trace(); - - // notifies the CPU that we're potentially in a spin loop - SpinLoop(SPLP_ABORT); - - /* make the transaction no longer active */ - stm_active = 0; - d->atomic = 0; - - /* release the lock */ - spinlock_release(d->public_descriptor->collection_lock); - - /* clear memory registered by stm_clear_on_abort */ - if (d->mem_clear_on_abort) - memset(d->mem_clear_on_abort, 0, d->mem_bytes_to_clear_on_abort); - - /* invoke the callbacks registered by stm_call_on_abort */ - stm_invoke_callbacks_on_abort(d); - stm_clear_callbacks_on_abort(d); - - /* XXX */ - fprintf(stderr, "[%lx] abort %s\n", - (long)d->public_descriptor_index, abort_names[num]); - dprintf(("\n" - "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" - "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" - "!!!!!!!!!!!!!!!!!!!!! [%lx] abort %s\n" - "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" - "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" - "\n", (long)d->public_descriptor_index, abort_names[num])); - if (num != ABRT_MANUAL && d->max_aborts >= 0 && !d->max_aborts--) - stm_fatalerror("unexpected abort!\n"); - - // jump back to the setjmp_buf (this call does not return) - stm_stop_sharedlock(); - if (d->longjmp_callback != NULL) - { - stm_begin_transaction(d->setjmp_buf, d->longjmp_callback); - d->longjmp_callback(d->setjmp_buf); - } - else - longjmp(*(jmp_buf *)d->setjmp_buf, 1); - - stm_fatalerror("longjmp() call should not return"); -} - -void AbortTransactionAfterCollect(struct tx_descriptor *d, int reason) -{ - if (*d->active_ref >= 0) - { - dprintf(("abort %d after collect!\n", reason)); - assert(*d->active_ref == 1); /* not 2, which means inevitable */ - *d->active_ref = -reason; - } - assert(*d->active_ref < 0); -} - -void AbortNowIfDelayed(void) -{ - struct tx_descriptor *d = thread_descriptor; - if (stm_active < 0) - { - int reason = -stm_active; - stm_active = 1; - AbortTransaction(reason); - } -} - -/************************************************************/ - -static void update_reads_size_limit(struct tx_descriptor *d) -{ - /* 'reads_size_limit' is set to ULONG_MAX if we are atomic; else - we copy the value from reads_size_limit_nonatomic. */ - d->reads_size_limit = d->atomic ? ULONG_MAX : d->reads_size_limit_nonatomic; -} - -long stm_atomic(long delta) -{ - struct tx_descriptor *d = thread_descriptor; - if (delta) // no atomic-checks - dprintf(("stm_atomic(%lu)\n", delta)); - d->atomic += delta; - assert(d->atomic >= 0); - update_reads_size_limit(d); - return d->atomic; -} - -static void init_transaction(struct tx_descriptor *d, int already_locked) -{ - assert(d->atomic == 0); - assert(*d->active_ref == 0); - if (!already_locked) - stm_start_sharedlock(); - assert(*d->active_ref == 0); - - if (clock_gettime(CLOCK_MONOTONIC, &d->start_real_time) < 0) { - d->start_real_time.tv_nsec = -1; - } - assert(d->list_of_read_objects.size == 0); - assert(d->private_from_protected.size == 0); - assert(d->num_private_from_protected_known_old == 0); - assert(d->num_read_objects_known_old == 0); - assert(!g2l_any_entry(&d->public_to_private)); - assert(d->old_thread_local_obj == NULL); - - d->count_reads = 1; - fxcache_clear(&d->recent_reads_cache); - gcptrlist_clear(&d->abortinfo); -} - -void stm_begin_transaction(void *buf, void (*longjmp_callback)(void *)) -{ - struct tx_descriptor *d = thread_descriptor; - init_transaction(d, 0); - stm_active = 1; - d->setjmp_buf = buf; - d->longjmp_callback = longjmp_callback; - d->old_thread_local_obj = stm_thread_local_obj; - d->start_time = GetGlobalCurTime(d); - update_reads_size_limit(d); -} - -static void AcquireLocks(struct tx_descriptor *d) -{ - revision_t my_lock = d->my_lock; - wlog_t *item; - - dprintf(("acquire_locks\n")); - assert(!stm_has_got_any_lock(d)); - assert(d->public_descriptor->stolen_objects.size == 0); - - if (!g2l_any_entry(&d->public_to_private)) - return; - - G2L_LOOP_FORWARD(d->public_to_private, item) - { - gcptr R = item->addr; - revision_t v; - retry: - assert(R->h_tid & GCFLAG_PUBLIC); - assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - v = ACCESS_ONCE(R->h_revision); - if (IS_POINTER(v)) /* "has a more recent revision" */ - { - assert(v != 0); - AbortTransaction(ABRT_COMMIT); - } - if (v >= LOCKED) // already locked by someone else - { - // we can always spinloop here: deadlocks should be impossible, - // because G2L_LOOP_FORWARD should ensure a consistent ordering - // of the R's. - assert(v != my_lock); - SpinLoop(SPLP_LOCKED_COMMIT); - goto retry; - } - if (!bool_cas(&R->h_revision, v, my_lock)) - goto retry; - - gcptr L = item->val; - assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED ? - L->h_revision == (revision_t)R : - L->h_revision == stm_private_rev_num); - assert(v != stm_private_rev_num); - assert(v & 1); - L->h_revision = v; /* store temporarily this value here */ - - } G2L_LOOP_END; -} - -static void CancelLocks(struct tx_descriptor *d) -{ - wlog_t *item; - dprintf(("cancel_locks\n")); - if (!g2l_any_entry(&d->public_to_private)) - return; - - G2L_LOOP_FORWARD(d->public_to_private, item) - { - gcptr R = item->addr; - gcptr L = item->val; - if (L == NULL) - continue; - - revision_t expected, v = L->h_revision; - - if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) - expected = (revision_t)R; - else - expected = stm_private_rev_num; - - if (v == expected) - { - assert(R->h_revision != d->my_lock); - break; /* done */ - } - - L->h_revision = expected; - -#ifdef DUMP_EXTRA - dprintf(("%p->h_revision = %p (CancelLocks)\n", R, (gcptr)v)); -#endif - assert(R->h_revision == d->my_lock); - ACCESS_ONCE(R->h_revision) = v; - - } G2L_LOOP_END; -} - -_Bool stm_has_got_any_lock(struct tx_descriptor *d) -{ - wlog_t *item; - int found_locked, found_unlocked; - - if (!g2l_any_entry(&d->public_to_private)) - return 0; - - found_locked = 0; - found_unlocked = 0; - - G2L_LOOP_FORWARD(d->public_to_private, item) - { - gcptr R = item->addr; - gcptr L = item->val; - if (L == NULL) - continue; - - revision_t expected, v = L->h_revision; - - if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) - expected = (revision_t)R; - else - expected = *d->private_revision_ref; - - if (v == expected) - { - assert(R->h_revision != d->my_lock); - found_unlocked = 1; - continue; - } - - found_locked = 1; - assert(found_unlocked == 0); /* an unlocked followed by a locked: no */ - - } G2L_LOOP_END; - - return found_locked; -} - -static pthread_mutex_t mutex_prebuilt_gcroots = PTHREAD_MUTEX_INITIALIZER; - -static void UpdateChainHeads(struct tx_descriptor *d, revision_t cur_time, - revision_t localrev) -{ - wlog_t *item; - revision_t new_revision = cur_time + 1; // make an odd number - assert(new_revision & 1); - - if (!g2l_any_entry(&d->public_to_private)) - return; - - G2L_LOOP_FORWARD(d->public_to_private, item) - { - gcptr L = item->val; - assert(!(L->h_tid & GCFLAG_VISITED)); - assert(!(L->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); - assert(!(L->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - assert(!(L->h_tid & GCFLAG_MOVED)); - assert(L->h_revision != localrev); /* modified by AcquireLocks() */ - -#ifdef DUMP_EXTRA - dprintf(("%p->h_revision = %p (UpdateChainHeads)\n", - L, (gcptr)new_revision)); -#endif - L->h_revision = new_revision; - - gcptr stub = stm_stub_malloc(d->public_descriptor, 0); - stub->h_tid = (L->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC - | GCFLAG_STUB - | GCFLAG_SMALLSTUB - | GCFLAG_OLD; - dprintf(("et.c: stm_stub_malloc -> %p\n", stub)); - stub->h_revision = ((revision_t)L) | 2; - - assert(!(L->h_tid & GCFLAG_HAS_ID)); - if (L->h_original) { - stub->h_original = L->h_original; - } - else if (L->h_tid & GCFLAG_OLD) { - stub->h_original = (revision_t)L; - } - else { - /* There shouldn't be a public, young object without - a h_original. They only come from stealing which - always sets h_original */ - assert(0); - /* L->h_original = (revision_t)stub; */ - /* if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { */ - /* ((gcptr)L->h_revision)->h_original = (revision_t)stub; */ - /* } */ - } - - item->val = stub; - - } G2L_LOOP_END; - - smp_wmb(); /* a memory barrier: make sure the new L->h_revisions are visible - from other threads before we change the R->h_revisions */ - - G2L_LOOP_FORWARD(d->public_to_private, item) - { - gcptr R = item->addr; - revision_t v = (revision_t)item->val; - - assert(R->h_tid & GCFLAG_PUBLIC); - assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - assert(!(R->h_tid & GCFLAG_MOVED)); - assert(R->h_revision != localrev); - -#ifdef DUMP_EXTRA - dprintf(("%p->h_revision = %p (stub to %p)\n", - R, (gcptr)v, (gcptr)item->val->h_revision)); -#endif - ACCESS_ONCE(R->h_revision) = v; - - if (R->h_tid & GCFLAG_PREBUILT_ORIGINAL) - { - /* cannot possibly get here more than once for a given value of R */ - pthread_mutex_lock(&mutex_prebuilt_gcroots); - gcptrlist_insert(&stm_prebuilt_gcroots, R); - pthread_mutex_unlock(&mutex_prebuilt_gcroots); - } - - } G2L_LOOP_END; - - g2l_clear(&d->public_to_private); -} - -void CommitPrivateFromProtected(struct tx_descriptor *d, revision_t cur_time) -{ - long i, size = d->private_from_protected.size; - gcptr *items = d->private_from_protected.items; - revision_t new_revision = cur_time + 1; // make an odd number - assert(new_revision & 1); - assert(d->public_descriptor->stolen_objects.size == 0); - - for (i = 0; i < size; i++) - { - gcptr P = items[i]; - assert(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - P->h_tid &= ~GCFLAG_PRIVATE_FROM_PROTECTED; - - if (!IS_POINTER(P->h_revision)) - { - /* This case occurs when a GCFLAG_PRIVATE_FROM_PROTECTED object - is stolen: it ends up as a value in 'public_to_private'. - Its h_revision is then mangled by AcquireLocks(). */ - assert(P->h_revision != stm_private_rev_num); - continue; - } - - gcptr B = (gcptr)P->h_revision; - P->h_revision = new_revision; - - if (B->h_tid & GCFLAG_PUBLIC) - { - assert(!(P->h_tid & GCFLAG_HAS_ID)); - - /* B was stolen */ - while (1) - { - revision_t v = ACCESS_ONCE(B->h_revision); - if (IS_POINTER(v)) /* "was modified" */ - AbortTransaction(ABRT_STOLEN_MODIFIED); - - if (bool_cas(&B->h_revision, v, (revision_t)P)) - break; - } - } - else - { - stmgcpage_free(B); - dprintf(("commit: free backup at %p\n", B)); - } - }; - gcptrlist_clear(&d->private_from_protected); - d->num_private_from_protected_known_old = 0; - d->num_read_objects_known_old = 0; - dprintf(("private_from_protected: clear (commit)\n")); -} - -void AbortPrivateFromProtected(struct tx_descriptor *d) -{ - long i, size = d->private_from_protected.size; - gcptr *items = d->private_from_protected.items; - - for (i = 0; i < size; i++) - { - gcptr P = items[i]; - assert(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - assert(IS_POINTER(P->h_revision)); - P->h_tid &= ~GCFLAG_PRIVATE_FROM_PROTECTED; - - gcptr B = (gcptr)P->h_revision; - assert(B->h_tid & GCFLAG_OLD); - - if (B->h_tid & GCFLAG_PUBLIC) - { - assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); - P->h_tid |= GCFLAG_PUBLIC; - assert(!(P->h_tid & GCFLAG_HAS_ID)); - if (!(P->h_tid & GCFLAG_OLD)) P->h_tid |= GCFLAG_MOVED; - /* P becomes a public outdated object. It may create an - exception documented in doc-objects.txt: a public but young - object. It's still fine because it should only be seen by - other threads during stealing, and as it's outdated, - stealing will follow its h_revision (to B). - */ - } - else - { - /* copy the backup copy B back over the now-protected object P, - and then free B, which will not be used any more. */ - size_t size = stmgc_size(B); - assert(B->h_tid & GCFLAG_BACKUP_COPY); - /* if h_original was 0, it must stay that way and not point - to itself. (B->h_original may point to P) */ - revision_t h_original = P->h_original; - memcpy(((char *)P) + offsetof(struct stm_object_s, h_revision), - ((char *)B) + offsetof(struct stm_object_s, h_revision), - size - offsetof(struct stm_object_s, h_revision)); - P->h_original = h_original; - assert(!(P->h_tid & GCFLAG_BACKUP_COPY)); - stmgcpage_free(B); - dprintf(("abort: free backup at %p\n", B)); - } - }; - gcptrlist_clear(&d->private_from_protected); - d->num_private_from_protected_known_old = 0; - d->num_read_objects_known_old = 0; - dprintf(("private_from_protected: clear (abort)\n")); -} - -void CommitTransaction(int stay_inevitable) -{ /* must save roots around this call */ - revision_t cur_time; - struct tx_descriptor *d = thread_descriptor; - assert(*d->active_ref >= 1); - assert(d->atomic == 0); - dprintf(("CommitTransaction(%d): %p\n", stay_inevitable, d)); - - spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ - if (d->public_descriptor->stolen_objects.size != 0) - stm_normalize_stolen_objects(d); - AcquireLocks(d); - - if (is_inevitable(d)) - { - // no-one else can have changed global_cur_time if I'm inevitable - cur_time = d->start_time; - if (!bool_cas(&global_cur_time, cur_time + 1, cur_time + 2)) - { - stm_fatalerror("global_cur_time modified even though we are inev\n"); - } - - if (!stay_inevitable) { - /* we simply don't release the mutex. */ - inev_mutex_release(); - } - } - else - { - while (1) - { - cur_time = ACCESS_ONCE(global_cur_time); - if (cur_time & 1) - { // there is another inevitable transaction - CancelLocks(d); - spinlock_release(d->public_descriptor->collection_lock); - inev_mutex_acquire(d); // wait until released - inev_mutex_release(); - spinlock_acquire(d->public_descriptor->collection_lock, 'C'); - if (d->public_descriptor->stolen_objects.size != 0) - stm_normalize_stolen_objects(d); - - AcquireLocks(d); - continue; - } - if (bool_cas(&global_cur_time, cur_time, cur_time + 2)) - break; - } - // validate (but skip validation if nobody else committed) - if (cur_time != d->start_time) - if (!ValidateDuringTransaction(d, 1)) - AbortTransaction(ABRT_VALIDATE_COMMIT); - } - - CommitPrivateFromProtected(d, cur_time); - - /* we cannot abort any more from here */ - d->setjmp_buf = NULL; - d->old_thread_local_obj = NULL; - gcptrlist_clear(&d->list_of_read_objects); - - dprintf(("\n" - "*************************************\n" - "************************************** committed %ld\n" - "*************************************\n", - (long)cur_time)); - - revision_t localrev = stm_private_rev_num; - //UpdateProtectedChainHeads(d, cur_time, localrev); - //smp_wmb(); - - revision_t newrev = -(cur_time + 1); - assert(newrev & 1); - ACCESS_ONCE(stm_private_rev_num) = newrev; - dprintf(("%p: stm_local_revision = %ld\n", d, (long)newrev)); - assert(d->private_revision_ref == &stm_private_rev_num); - assert(d->read_barrier_cache_ref == &stm_read_barrier_cache); - - UpdateChainHeads(d, cur_time, localrev); - - spinlock_release(d->public_descriptor->collection_lock); - d->num_commits++; - stm_active = 0; - if (!stay_inevitable) - stm_stop_sharedlock(); - - /* clear the list of callbacks that would have been called - on abort */ - stm_clear_callbacks_on_abort(d); -} - -/************************************************************/ - -static void make_inevitable(struct tx_descriptor *d) -{ - d->setjmp_buf = NULL; - d->old_thread_local_obj = NULL; - *d->active_ref = 2; - d->reads_size_limit_nonatomic = 0; - update_reads_size_limit(d); - dprintf(("make_inevitable(%p)\n", d)); -} - -static revision_t acquire_inev_mutex_and_mark_global_cur_time( - struct tx_descriptor *d) -{ /* must save roots around this call */ - revision_t cur_time; - - inev_mutex_acquire(d); - while (1) - { - cur_time = ACCESS_ONCE(global_cur_time); - assert((cur_time & 1) == 0); - if (bool_cas(&global_cur_time, cur_time, cur_time + 1)) - break; - /* else try again */ - } - return cur_time; -} - -void BecomeInevitable(const char *why) -{ /* must save roots around this call */ - revision_t cur_time; - struct tx_descriptor *d = thread_descriptor; - if (d == NULL || stm_active != 1) - return; /* I am already inevitable, or not in a transaction at all - (XXX statically we should know when we're outside - a transaction) */ - - /* XXX */ - /* fprintf(stderr, "[%lx] inevitable: %s\n", */ - /* (long)d->public_descriptor_index, why); */ - dprintf(("[%lx] inevitable: %s\n", - (long)d->public_descriptor_index, why)); - - cur_time = acquire_inev_mutex_and_mark_global_cur_time(d); - if (d->start_time != cur_time) - { - d->start_time = cur_time; - if (!ValidateDuringTransaction(d, 0)) - { - global_cur_time = cur_time; // revert from (cur_time + 1) - inev_mutex_release(); - AbortTransaction(ABRT_VALIDATE_INEV); - } - } - make_inevitable(d); /* cannot abort any more */ -} - -void BeginInevitableTransaction(int already_inevitable) -{ /* must save roots around this call */ - struct tx_descriptor *d = thread_descriptor; - revision_t cur_time; - - init_transaction(d, already_inevitable); - - if (already_inevitable) { - cur_time = ACCESS_ONCE(global_cur_time); - assert((cur_time & 1) == 0); - if (!bool_cas(&global_cur_time, cur_time, cur_time + 1)) { - stm_fatalerror("there was a commit between a partial inevitable " - "commit and the continuation of the transaction\n"); - } - } - else { - cur_time = acquire_inev_mutex_and_mark_global_cur_time(d); - } - - d->start_time = cur_time; - make_inevitable(d); -} - -/************************************************************/ - -#if 0 -static _Bool _PtrEq_Globals(gcptr G1, gcptr G2) -{ - /* This is a mess, because G1 and G2 can be different pointers to "the From noreply at buildbot.pypy.org Wed Mar 5 16:52:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Mar 2014 16:52:01 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: first small fixes Message-ID: <20140305155201.C4B3D1D2696@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69726:a89adf77a9e4 Date: 2014-03-05 14:05 +0100 http://bitbucket.org/pypy/pypy/changeset/a89adf77a9e4/ Log: first small fixes diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -4,7 +4,7 @@ from rpython.memory.gctransform.framework import ( TYPE_ID, BaseFrameworkGCTransformer, BaseRootWalker, sizeofaddr) from rpython.memory.gctypelayout import WEAKREF, WEAKREFPTR -from rpython.rtyper import rmodel +from rpython.rtyper import rmodel, llannotation class StmFrameworkGCTransformer(BaseFrameworkGCTransformer): @@ -14,7 +14,7 @@ s_gc, s_typeid16) gc = self.gcdata.gc # - s_gcref = annmodel.SomePtr(llmemory.GCREF) + s_gcref = llannotation.SomePtr(llmemory.GCREF) self.malloc_weakref_ptr = self._getfn( GCClass.malloc_weakref.im_func, @@ -25,7 +25,7 @@ return gc.get_size(obj) pypy_stmcb_size.c_name = "pypy_stmcb_size" self.autoregister_ptrs.append( - getfn(pypy_stmcb_size, [annmodel.SomeAddress()], + getfn(pypy_stmcb_size, [llannotation.SomeAddress()], annmodel.SomeInteger())) # def invokecallback(root, visit_fn): @@ -34,8 +34,8 @@ gc.trace(obj, invokecallback, visit_fn) pypy_stmcb_trace.c_name = "pypy_stmcb_trace" self.autoregister_ptrs.append( - getfn(pypy_stmcb_trace, [annmodel.SomeAddress(), - annmodel.SomePtr(GCClass.VISIT_FPTR)], + getfn(pypy_stmcb_trace, [llannotation.SomeAddress(), + llannotation.SomePtr(GCClass.VISIT_FPTR)], annmodel.s_None)) def build_root_walker(self): diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -331,6 +331,7 @@ from rpython.annotator import model as annmodel from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator + from rpython.rtyper.llannotation import lltype_to_annotation entrypoint = self.entrypoint # def entrypoint_wrapper(argc, argv): @@ -343,7 +344,7 @@ # mix = MixLevelHelperAnnotator(self.translator.rtyper) args_s = [annmodel.SomeInteger(), - annmodel.lltype_to_annotation(rffi.CCHARPP)] + lltype_to_annotation(rffi.CCHARPP)] s_result = annmodel.SomeInteger() graph = mix.getgraph(entrypoint_wrapper, args_s, s_result) mix.finish() diff --git a/rpython/translator/stm/jitdriver.py b/rpython/translator/stm/jitdriver.py --- a/rpython/translator/stm/jitdriver.py +++ b/rpython/translator/stm/jitdriver.py @@ -2,7 +2,8 @@ from rpython.flowspace.model import checkgraph, copygraph from rpython.flowspace.model import Block, Link, SpaceOperation, Constant from rpython.translator.unsimplify import split_block, varoftype -from rpython.annotator.model import lltype_to_annotation, s_Int +from rpython.annotator.model import s_Int +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.annlowlevel import (MixLevelHelperAnnotator, cast_base_ptr_to_instance) from rpython.rlib import rstm diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -605,12 +605,12 @@ external_release_gil = rffi.llexternal('external_release_gil', [], lltype.Void, _callable=lambda: None, random_effects_on_gcobjs=True, - threadsafe=True) # GIL is released + releasegil=True) external_any_gcobj = rffi.llexternal('external_any_gcobj', [], lltype.Void, _callable=lambda: None, random_effects_on_gcobjs=True, - threadsafe=False) # GIL is not released + releasegil=False) external_safest = rffi.llexternal('external_safest', [], lltype.Void, _callable=lambda: None, random_effects_on_gcobjs=False, - threadsafe=False) # GIL is not released + releasegil=False) From noreply at buildbot.pypy.org Wed Mar 5 17:09:49 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 5 Mar 2014 17:09:49 +0100 (CET) Subject: [pypy-commit] stmgc default: a more randomness in allocation sizes Message-ID: <20140305160949.D29071C0290@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r962:b99ef80df576 Date: 2014-03-05 15:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/b99ef80df576/ Log: a more randomness in allocation sizes diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -208,15 +208,15 @@ case 3: // allocate fresh 'p' push_roots(); size_t sizes[4] = {sizeof(struct node_s), - sizeof(struct node_s) + 48, + sizeof(struct node_s) + (get_rand(100000) & ~15), sizeof(struct node_s) + 4096, sizeof(struct node_s) + 4096*70}; size_t size = sizes[get_rand(4)]; p = stm_allocate(size); ((nodeptr_t)p)->sig = SIGNATURE; ((nodeptr_t)p)->my_size = size; - ((nodeptr_t)p)->my_id = -1; - ((nodeptr_t)p)->my_hash = -1; + ((nodeptr_t)p)->my_id = 0; + ((nodeptr_t)p)->my_hash = 0; pop_roots(); /* reload_roots not necessary, all are old after start_transaction */ break; @@ -239,7 +239,7 @@ case 8: // id checking if (p) { nodeptr_t n = (nodeptr_t)p; - if (n->my_id == -1) { + if (n->my_id == 0) { write_barrier(p); n->my_id = stm_id(p); } @@ -252,7 +252,7 @@ case 9: if (p) { nodeptr_t n = (nodeptr_t)p; - if (n->my_hash == -1) { + if (n->my_hash == 0) { write_barrier(p); n->my_hash = stm_identityhash(p); } @@ -367,8 +367,8 @@ struct node_s prebuilt_template = { .sig = SIGNATURE, .my_size = sizeof(struct node_s), - .my_id = -1, - .my_hash = -1, + .my_id = 0, + .my_hash = 0, .next = NULL }; @@ -379,7 +379,7 @@ prebuilt_roots[i] = stm_setup_prebuilt((objptr_t)new_templ); if (i % 2 == 0) { - int hash = i; + int hash = i + 5; stm_set_prebuilt_identityhash(prebuilt_roots[i], hash); ((nodeptr_t)prebuilt_roots[i])->my_hash = hash; From noreply at buildbot.pypy.org Wed Mar 5 17:09:51 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 5 Mar 2014 17:09:51 +0100 (CET) Subject: [pypy-commit] stmgc default: add inevitable transactions to the mix Message-ID: <20140305160951.161C51C0290@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r963:b053337ca45e Date: 2014-03-05 15:48 +0100 http://bitbucket.org/pypy/stmgc/changeset/b053337ca45e/ Log: add inevitable transactions to the mix diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -273,12 +273,17 @@ int k; _r = get_random_root(); - k = get_rand(11); + k = get_rand(12); - if (k < 10) + if (k < 10) { p = simple_events(p, _r); - else if (get_rand(20) == 1) { + } else if (get_rand(20) == 1) { return (objptr_t)-1; // break current + } else if (get_rand(20) == 1) { + push_roots(); + stm_become_inevitable("please"); + pop_roots(); + return NULL; } return p; } From noreply at buildbot.pypy.org Wed Mar 5 17:09:52 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 5 Mar 2014 17:09:52 +0100 (CET) Subject: [pypy-commit] stmgc default: we should free those lists too in abort_data_structures_from_segment_num Message-ID: <20140305160952.3C8841C0290@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r964:f14b4b47b93a Date: 2014-03-05 17:10 +0100 http://bitbucket.org/pypy/stmgc/changeset/f14b4b47b93a/ Log: we should free those lists too in abort_data_structures_from_segment_num diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -473,6 +473,10 @@ stm_thread_local_t *tl = pseg->pub.running_thread; tl->shadowstack = pseg->shadowstack_at_start_of_transaction; tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; + + /* reset these lists to NULL too on abort */ + LIST_FREE(pseg->objects_pointing_to_nursery); + LIST_FREE(pseg->large_overflow_objects); } static void abort_with_mutex(void) From noreply at buildbot.pypy.org Wed Mar 5 17:52:04 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 5 Mar 2014 17:52:04 +0100 (CET) Subject: [pypy-commit] pypy numpypy-nditer: start to pass tests Message-ID: <20140305165204.E5E671C31E9@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r69727:ecda1fad7553 Date: 2014-03-05 18:51 +0200 http://bitbucket.org/pypy/pypy/changeset/ecda1fad7553/ Log: start to pass tests diff --git a/pypy/module/micronumpy/interp_nditer.py b/pypy/module/micronumpy/interp_nditer.py --- a/pypy/module/micronumpy/interp_nditer.py +++ b/pypy/module/micronumpy/interp_nditer.py @@ -198,7 +198,7 @@ backstrides = imp.backstrides r = calculate_broadcast_strides(strides, backstrides, imp.shape, shape, backward) - return ArrayIter(imp, shape, r[0], r[1]) + return ArrayIter(imp, imp.get_size(), shape, r[0], r[1]) def is_backward(imp, order): if order == 'K' or (order == 'C' and imp.order == 'C'): @@ -268,7 +268,8 @@ parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, len(self.seq), parse_op_flag) - self.set_op_axes(space, w_op_axes) + if not space.is_none(w_op_axes): + self.set_op_axes(space, w_op_axes) self.iters=[] self.shape = iter_shape = shape_agreement_multiple(space, self.seq) if self.tracked_index != "": diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -42,6 +42,7 @@ """ from rpython.rlib import jit from pypy.module.micronumpy import support +from pypy.module.micronumpy.strides import calc_strides from pypy.module.micronumpy.base import W_NDimArray @@ -148,7 +149,7 @@ dtype = arr.implementation.dtype if backward: self.slicesize = shape[0] - self.gap = [support.product(shape[1:]) * dtype.get_size()] + self.gap = [support.product(shape[1:]) * dtype.elsize] self.strides = strides[1:] self.backstrides = backstrides[1:] self.shape = shape[1:] @@ -157,9 +158,8 @@ self.shape.reverse() self.shapelen = len(self.shape) else: - shape = [support.product(shape)] - self.strides, self.backstrides = support.calc_strides(shape, - dtype, order) + self.shape = [support.product(shape)] + self.strides, self.backstrides = calc_strides(shape, dtype, order) self.slicesize = support.product(shape) self.shapelen = 0 self.gap = self.strides @@ -185,9 +185,10 @@ self.offset = offset def getslice(self): - from pypy.module.micronumpy.arrayimpl.concrete import SliceArray - return SliceArray(self.offset, self.gap, self.backstrides, + from pypy.module.micronumpy.concrete import SliceArray + retVal = SliceArray(self.offset, self.gap, self.backstrides, [self.slicesize], self.arr.implementation, self.arr, self.dtype) + return retVal def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -11,7 +11,7 @@ BaseNumpyAppTest.setup_class.im_func(cls) def test_basic(self): - from numpypy import arange, nditer + from numpy import arange, nditer a = arange(6).reshape(2,3) r = [] for x in nditer(a): @@ -24,7 +24,7 @@ assert r == [0, 1, 2, 3, 4, 5] def test_order(self): - from numpypy import arange, nditer + from numpy import arange, nditer a = arange(6).reshape(2,3) r = [] for x in nditer(a, order='C'): @@ -36,14 +36,14 @@ assert r == [0, 3, 1, 4, 2, 5] def test_readwrite(self): - from numpypy import arange, nditer + from numpy import arange, nditer a = arange(6).reshape(2,3) for x in nditer(a, op_flags=['readwrite']): x[...] = 2 * x assert (a == [[0, 2, 4], [6, 8, 10]]).all() def test_external_loop(self): - from numpypy import arange, nditer, array + from numpy import arange, nditer, array a = arange(24).reshape(2, 3, 4) r = [] n = 0 @@ -67,7 +67,7 @@ assert e def test_index(self): - from numpypy import arange, nditer, zeros + from numpy import arange, nditer a = arange(6).reshape(2,3) r = [] @@ -92,7 +92,7 @@ @py.test.mark.xfail(reason="Fortran order not implemented") def test_iters_with_different_order(self): - from numpypy import nditer, array + from numpy import nditer, array a = array([[1, 2], [3, 4]], order="C") b = array([[1, 2], [3, 4]], order="F") @@ -102,7 +102,7 @@ assert list(it) == zip(range(1, 5), range(1, 5)) def test_interface(self): - from numpypy import arange, nditer, zeros + from numpy import arange, nditer, zeros a = arange(6).reshape(2,3) r = [] it = nditer(a, flags=['f_index']) @@ -120,7 +120,7 @@ assert str(exc.value).startswith("Iterator flag EXTERNAL_LOOP cannot") def test_buffered(self): - from numpypy import arange, nditer, array + from numpy import arange, nditer, array a = arange(6).reshape(2,3) r = [] for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): @@ -128,7 +128,7 @@ assert (array(r) == [0, 3, 1, 4, 2, 5]).all() def test_op_dtype(self): - from numpypy import arange, nditer, sqrt, array + from numpy import arange, nditer, sqrt, array a = arange(6).reshape(2,3) - 3 exc = raises(nditer, a, op_dtypes=['complex']) assert str(exc.value).startswith("Iterator operand required copying or buffering") @@ -146,26 +146,26 @@ 1+0j, 1.41421356237+0j]).sum()) < 1e-5 def test_casting(self): - from numpypy import arange, nditer + from numpy import arange, nditer a = arange(6.) - exc = raises(nditer, a, flags=['buffered'], op_dtypes=['float32']) + exc = raises(ValueError, nditer, a, flags=['buffered'], op_dtypes=['float32']) assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") r = [] for x in nditer(a, flags=['buffered'], op_dtypes=['float32'], casting='same_kind'): r.append(x) assert r == [0., 1., 2., 3., 4., 5.] - exc = raises(nditer, a, flags=['buffered'], + exc = raises(ValueError, nditer, a, flags=['buffered'], op_dtypes=['int32'], casting='same_kind') assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") r = [] b = arange(6) - exc = raises(nditer, b, flags=['buffered'], op_dtypes=['float64'], + exc = raises(ValueError, nditer, b, flags=['buffered'], op_dtypes=['float64'], op_flags=['readwrite'], casting='same_kind') assert str(exc.value).startswith("Iterator requested dtype could not be cast") def test_broadcast(self): - from numpypy import arange, nditer + from numpy import arange, nditer a = arange(3) b = arange(6).reshape(2,3) r = [] @@ -173,11 +173,11 @@ r.append((x, y)) assert r == [(0, 0), (1, 1), (2, 2), (0, 3), (1, 4), (2, 5)] a = arange(2) - exc = raises(nditer, [a, b]) + exc = raises(ValueError, nditer, [a, b]) assert str(exc.value).find('shapes (2) (2,3)') > 0 def test_outarg(self): - from numpypy import nditer, zeros, arange + from numpy import nditer, zeros, arange def square1(a): it = nditer([a, None]) @@ -202,7 +202,7 @@ assert str(exc.value).startswith('non-broadcastable output') def test_outer_product(self): - from numpypy import nditer, arange + from numpy import nditer, arange a = arange(3) b = arange(8).reshape(2,4) it = nditer([a, b, None], flags=['external_loop'], @@ -214,7 +214,7 @@ assert (it.operands[2][i] == a[i]*b).all() def test_reduction(self): - from numpypy import nditer, arange, array + from numpy import nditer, arange, array a = arange(24).reshape(2, 3, 4) b = array(0) #reduction operands must be readwrite @@ -249,13 +249,13 @@ assert (it.operands[1] == a.sum(axis=2)).all() def test_get_dtypes(self): - from numpypy import array, dtype, nditer + from numpy import array, dtype, nditer x = array([1, 2]) y = array([1.0, 2.0]) assert nditer([x, y]).dtypes == (dtype("int64"), dtype("float64")) def test_multi_index(self): - import numpypy as np + import numpy as np a = np.arange(6).reshape(2, 3) From noreply at buildbot.pypy.org Wed Mar 5 18:15:42 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Wed, 5 Mar 2014 18:15:42 +0100 (CET) Subject: [pypy-commit] pypy default: Backed out changeset: 925eec5518eb, bdk fixed everything properly before Message-ID: <20140305171542.94E201C3347@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: Changeset: r69728:75a2fb413619 Date: 2014-03-05 18:12 +0100 http://bitbucket.org/pypy/pypy/changeset/75a2fb413619/ Log: Backed out changeset: 925eec5518eb, bdk fixed everything properly before diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -214,10 +214,10 @@ i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? - i23 = int_ge(i18, 0) - guard_true(i23, descr=...) - i25 = int_lt(i18, i9) - guard_true(i25, descr=...) + i23 = int_lt(i18, 0) + guard_false(i23, descr=...) + i25 = int_ge(i18, i9) + guard_false(i25, descr=...) i27 = int_add_ovf(i7, i18) guard_no_overflow(descr=...) --TICK-- From noreply at buildbot.pypy.org Wed Mar 5 18:15:43 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Wed, 5 Mar 2014 18:15:43 +0100 (CET) Subject: [pypy-commit] pypy default: Backed out changeset: 10e1e307844f, bdk fixed everything before Message-ID: <20140305171543.E052F1C3347@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: Changeset: r69729:983423db26e2 Date: 2014-03-05 18:13 +0100 http://bitbucket.org/pypy/pypy/changeset/983423db26e2/ Log: Backed out changeset: 10e1e307844f, bdk fixed everything before diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -176,14 +176,14 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match(""" guard_not_invalidated? - i16 = int_lt(i11, i12) - guard_true(i16, descr=...) + i16 = int_ge(i11, i12) + guard_false(i16, descr=...) i20 = int_add(i11, 1) i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? - i25 = int_lt(i11, i9) - guard_true(i25, descr=...) + i25 = int_ge(i11, i9) + guard_false(i25, descr=...) i27 = int_add_ovf(i7, i11) guard_no_overflow(descr=...) --TICK-- From noreply at buildbot.pypy.org Wed Mar 5 19:36:26 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 19:36:26 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup, enable passing test Message-ID: <20140305183626.F1C4B1C35CC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69730:7b7d906a7882 Date: 2014-03-05 13:35 -0500 http://bitbucket.org/pypy/pypy/changeset/7b7d906a7882/ Log: cleanup, enable passing test diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -784,8 +784,6 @@ assert l == [1,2,3,4,5] def test_iadd_subclass(self): - #XXX - skip("Maybe there is something wrong in descroperation?") class Bar(object): def __radd__(self, other): return ('radd', self, other) @@ -1441,7 +1439,7 @@ l.__setslice__(0,3,l2) assert l == [0,1,2] - def test_getitem(self): + def test_getitem_range(self): l = range(5) raises(IndexError, "l[-6]") raises(IndexError, "l[5]") diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py --- a/pypy/objspace/std/test/test_rangeobject.py +++ b/pypy/objspace/std/test/test_rangeobject.py @@ -1,5 +1,6 @@ import py + class AppTestRangeListObject(object): spaceconfig = {"objspace.std.withrangelist": True} @@ -24,6 +25,14 @@ assert result == [1, 3, 5, 7] assert self.not_forced(r) + def test_getitem_simple(self): + r = range(4) + assert r[-1] == 3 + assert r[3] == 3 + assert r[-4] == 0 + raises(IndexError, r.__getitem__, -5) + raises(IndexError, r.__getitem__, 4) + def test_getitem_slice(self): result = [] r = range(1, 100, 2) @@ -71,6 +80,7 @@ r = range(10) r.sort(key=lambda x: -x) assert r == range(9, -1, -1) + def test_pop(self): # RangeListStrategy r = range(1, 10) @@ -109,14 +119,6 @@ assert not self.not_forced(r) assert r == [1, 2, 3, 4, 5, 6, 7] - def test_getitem_simple(self): - r = range(4) - assert r[-1] == 3 - assert r[3] == 3 - assert r[-4] == 0 - raises(IndexError, r.__getitem__, -5) - raises(IndexError, r.__getitem__, 4) - def test_reduce(self): it = iter(range(10)) assert it.next() == 0 From noreply at buildbot.pypy.org Wed Mar 5 20:45:13 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 5 Mar 2014 20:45:13 +0100 (CET) Subject: [pypy-commit] pypy default: backout 307818c61207 & 0e0d08198110, this is no longer needed for py3k Message-ID: <20140305194513.034AD1C0290@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69731:b58eb7873100 Date: 2014-03-05 11:38 -0800 http://bitbucket.org/pypy/pypy/changeset/b58eb7873100/ Log: backout 307818c61207 & 0e0d08198110, this is no longer needed for py3k diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -80,7 +80,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p93 = call(ConstClass(fromstr2), p25, 16, descr=) + p93 = call(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) i94 = call(ConstClass(rbigint.toint), p93, descr=) guard_no_exception(descr=...) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -542,7 +542,7 @@ def _string_to_w_long(space, w_longtype, w_source, string, base=10): try: - bigint = rbigint.fromstr2(string, base) + bigint = rbigint.fromstr(string, base) except ParseStringError as e: from pypy.objspace.std.intobject import wrap_parsestringerror raise wrap_parsestringerror(space, e, w_source) diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -254,28 +254,19 @@ @staticmethod @jit.elidable - def fromstr(s, base=0, ignore_l_suffix=False, fname='long'): - """As string_to_int(), but optionally ignores an optional 'l' or - 'L' suffix and returns an rbigint. - """ + def fromstr(s, base=0): + """As string_to_int(), but ignores an optional 'l' or 'L' suffix + and returns an rbigint.""" from rpython.rlib.rstring import NumberStringParser, \ strip_spaces s = literal = strip_spaces(s) - if (not ignore_l_suffix and (s.endswith('l') or s.endswith('L')) and - base < 22): + if (s.endswith('l') or s.endswith('L')) and base < 22: # in base 22 and above, 'L' is a valid digit! try: long('L',22) s = s[:-1] - parser = NumberStringParser(s, literal, base, fname) + parser = NumberStringParser(s, literal, base, 'long') return rbigint._from_numberstring_parser(parser) @staticmethod - @jit.elidable - def fromstr2(s, base=0): - """A sub-version of fromstr(), already elidable to be JIT-called - with only two arguments.""" - return rbigint.fromstr(s, base) - - @staticmethod def _from_numberstring_parser(parser): return parse_digit_string(parser) diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -214,19 +214,13 @@ from rpython.rlib.rstring import ParseStringError assert rbigint.fromstr('123L').tolong() == 123 assert rbigint.fromstr('123L ').tolong() == 123 - py.test.raises(ParseStringError, rbigint.fromstr, '123L ', - ignore_l_suffix=True) py.test.raises(ParseStringError, rbigint.fromstr, 'L') py.test.raises(ParseStringError, rbigint.fromstr, 'L ') - e = py.test.raises(ParseStringError, rbigint.fromstr, 'L ', - fname='int') - assert 'int()' in e.value.msg assert rbigint.fromstr('123L', 4).tolong() == 27 assert rbigint.fromstr('123L', 30).tolong() == 27000 + 1800 + 90 + 21 assert rbigint.fromstr('123L', 22).tolong() == 10648 + 968 + 66 + 21 assert rbigint.fromstr('123L', 21).tolong() == 441 + 42 + 3 assert rbigint.fromstr('1891234174197319').tolong() == 1891234174197319 - assert rbigint.fromstr2('123L', 4).tolong() == 27 def test_from_numberstring_parser(self): from rpython.rlib.rstring import NumberStringParser From noreply at buildbot.pypy.org Wed Mar 5 20:47:25 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 20:47:25 +0100 (CET) Subject: [pypy-commit] pypy default: clean up the list/range tests Message-ID: <20140305194725.AB59F1C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69732:b4e9b9428c20 Date: 2014-03-05 14:40 -0500 http://bitbucket.org/pypy/pypy/changeset/b4e9b9428c20/ Log: clean up the list/range tests diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -431,7 +431,7 @@ intlist.find(w(4), 0, 2) -class AppTestW_ListObject(object): +class AppTestListObject(object): def setup_class(cls): import platform import sys @@ -525,6 +525,18 @@ l.__init__(assignment) assert l == list(assignment) + def test_range_init(self): + x = range(5,1) + assert x == [] + + x = range(1,10) + x[22:0:-1] == range(1,10) + + r = range(10, 10) + assert len(r) == 0 + assert list(reversed(r)) == [] + assert r[:] == [] + def test_extend_list(self): l = l0 = [1] l.extend([2]) @@ -609,24 +621,28 @@ def test_sort_key(self): def lower(x): return x.lower() l = ['a', 'C', 'b'] - l.sort(key = lower) + l.sort(key=lower) assert l == ['a', 'b', 'C'] l = [] - l.sort(key = lower) + l.sort(key=lower) assert l == [] - l = [ 'a' ] - l.sort(key = lower) - assert l == [ 'a' ] + l = ['a'] + l.sort(key=lower) + assert l == ['a'] + + r = range(10) + r.sort(key=lambda x: -x) + assert r == range(9, -1, -1) def test_sort_reversed(self): l = range(10) - l.sort(reverse = True) + l.sort(reverse=True) assert l == range(9, -1, -1) l = [] - l.sort(reverse = True) + l.sort(reverse=True) assert l == [] l = [1] - l.sort(reverse = True) + l.sort(reverse=True) assert l == [1] def test_sort_cmp_key_reverse(self): @@ -640,6 +656,17 @@ l.sort() assert l == ["a", "b", "c", "d"] + def test_sort_range(self): + l = range(3, 10, 3) + l.sort() + assert l == [3, 6, 9] + l.sort(reverse=True) + assert l == [9, 6, 3] + l.sort(reverse=True) + assert l == [9, 6, 3] + l.sort() + assert l == [3, 6, 9] + def test_getitem(self): l = [1, 2, 3, 4, 5, 6, 9] assert l[0] == 1 @@ -663,6 +690,23 @@ l = [] raises(IndexError, "l[1]") + def test_getitem_range(self): + l = range(5) + raises(IndexError, "l[-6]") + raises(IndexError, "l[5]") + assert l[0] == 0 + assert l[-1] == 4 + assert l[-2] == 3 + assert l[-5] == 0 + + l = range(1, 5) + raises(IndexError, "l[-5]") + raises(IndexError, "l[4]") + assert l[0] == 1 + assert l[-1] == 4 + assert l[-2] == 3 + assert l[-4] == 1 + def test_setitem(self): l = [] raises(IndexError, "l[1] = 2") @@ -675,6 +719,10 @@ l[0] = "2" assert l == ["2",3] + l = range(3) + l[0] = 1 + assert l == [1,1,2] + def test_delitem(self): l = [1, 2, 3, 4, 5, 6, 9] del l[0] @@ -740,6 +788,29 @@ assert l[1:0:None] == [] assert l[1:0] == [] + def test_getslice_invalid(self): + x = [1,2,3,4] + assert x[10:0] == [] + assert x[10:0:None] == [] + + x = range(1,5) + assert x[10:0] == [] + assert x[10:0:None] == [] + + assert x[0:22] == [1,2,3,4] + assert x[-1:10] == [4] + + assert x[0:22:None] == [1,2,3,4] + assert x[-1:10:None] == [4] + + def test_getslice_range_backwards(self): + x = range(1,10) + assert x[22:-10] == [] + assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1] + assert x[10:3:-1] == [9,8,7,6,5] + assert x[10:3:-2] == [9,7,5] + assert x[1:5:-1] == [] + def test_delall(self): l = l0 = [1,2,3] del l[:] @@ -777,6 +848,13 @@ l1 += [0] assert l1 == ['a', 'b', 'c', 0] + r1 = r2 = range(5) + assert r1 is r2 + r1 += [15] + assert r1 is r2 + assert r1 == [0, 1, 2, 3, 4, 15] + assert r2 == [0, 1, 2, 3, 4, 15] + def test_iadd_iterable(self): l = l0 = [1,2,3] l += iter([4,5]) @@ -835,6 +913,13 @@ l *= 2 assert l == [0, 1, 0, 1] + r1 = r2 = range(3) + assert r1 is r2 + r1 *= 2 + assert r1 is r2 + assert r1 == [0, 1, 2, 0, 1, 2] + assert r2 == [0, 1, 2, 0, 1, 2] + def test_mul_errors(self): try: [1, 2, 3] * (3,) @@ -916,6 +1001,11 @@ assert l == [] assert l is l0 + l = [] + l2 = range(3) + l.__setslice__(0,3,l2) + assert l == [0,1,2] + def test_assign_extended_slice(self): l = l0 = ['a', 'b', 'c'] l[::-1] = ['a', 'b', 'c'] @@ -1002,10 +1092,6 @@ l.append(x) assert l == range(5) - l = range(4) - l.append(4) - assert l == range(5) - l = [1,2,3] l.append("a") assert l == [1,2,3,"a"] @@ -1014,6 +1100,22 @@ l.append(4.4) assert l == [1.1, 2.2, 3.3, 4.4] + l = range(4) + l.append(4) + assert l == range(5) + + l = range(5) + l.append(26) + assert l == [0,1,2,3,4,26] + + l = range(5) + l.append("a") + assert l == [0,1,2,3,4,"a"] + + l = range(5) + l.append(5) + assert l == [0,1,2,3,4,5] + def test_count(self): c = list('hello') assert c.count('l') == 2 @@ -1041,6 +1143,10 @@ l.insert(0,"a") assert l == ["a", 1, 2, 3] + l = range(3) + l.insert(1,5) + assert l == [0,5,1,2] + def test_pop(self): c = list('hello world') s = '' @@ -1053,6 +1159,7 @@ l = range(10) l.pop() assert l == range(9) + assert l.pop(0) == 0 l = [1.1, 2.2, 3.3] l.pop() @@ -1123,6 +1230,16 @@ c.reverse() assert ''.join(c) == 'dlrow olleh' + l = range(3) + l.reverse() + assert l == [2,1,0] + + r = range(3) + r[0] = 1 + assert r == [1, 1, 2] + r.reverse() + assert r == [2, 1, 1] + def test_reversed(self): assert list(list('hello').__reversed__()) == ['o', 'l', 'l', 'e', 'h'] assert list(reversed(list('hello'))) == ['o', 'l', 'l', 'e', 'h'] @@ -1387,106 +1504,27 @@ # assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"] + def test_no_len_on_range_iter(self): + iterable = range(10) + raises(TypeError, len, iter(iterable)) -class AppTestForRangeLists(AppTestW_ListObject): - spaceconfig = {"objspace.std.withrangelist": True} - - def test_range_simple_backwards(self): - x = range(5,1) - assert x == [] - - def test_range_big_start(self): - x = range(1,10) - x[22:0:-1] == range(1,10) - - def test_range_list_invalid_slice(self): - x = [1,2,3,4] - assert x[10:0] == [] - assert x[10:0:None] == [] - - x = range(1,5) - assert x[10:0] == [] - assert x[10:0:None] == [] - - assert x[0:22] == [1,2,3,4] - assert x[-1:10] == [4] - - assert x[0:22:None] == [1,2,3,4] - assert x[-1:10:None] == [4] - - def test_range_backwards(self): - x = range(1,10) - assert x[22:-10] == [] - assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1] - assert x[10:3:-1] == [9,8,7,6,5] - assert x[10:3:-2] == [9,7,5] - assert x[1:5:-1] == [] - - def test_sort_range(self): - l = range(3,10,3) - l.sort() - assert l == [3, 6, 9] - l.sort(reverse = True) - assert l == [9, 6, 3] - l.sort(reverse = True) - assert l == [9, 6, 3] - l.sort() - assert l == [3, 6, 9] - - def test_slice(self): - l = [] - l2 = range(3) - l.__setslice__(0,3,l2) - assert l == [0,1,2] - - def test_getitem_range(self): - l = range(5) - raises(IndexError, "l[-6]") - raises(IndexError, "l[5]") - assert l[0] == 0 - assert l[-1] == 4 - assert l[-2] == 3 - assert l[-5] == 0 - - l = range(1, 5) - raises(IndexError, "l[-5]") - raises(IndexError, "l[4]") - assert l[0] == 1 - assert l[-1] == 4 - assert l[-2] == 3 - assert l[-4] == 1 - - def test_append(self): - l = range(5) - l.append(26) - assert l == [0,1,2,3,4,26] - - l = range(5) - l.append("a") - assert l == [0,1,2,3,4,"a"] - - l = range(5) - l.append(5) - assert l == [0,1,2,3,4,5] - - def test_pop(self): - l = range(3) - assert l.pop(0) == 0 - - def test_setitem(self): - l = range(3) - l[0] = 1 - assert l == [1,1,2] - - def test_inset(self): - l = range(3) - l.insert(1,5) - assert l == [0,5,1,2] - - def test_reverse(self): - l = range(3) - l.reverse() - assert l == [2,1,0] + def test_reduce(self): + if self.on_cpython: + skip("cpython raises TypeError") # XXX investigate + it = iter(range(10)) + assert it.next() == 0 + assert it.next() == 1 + assert it.next() == 2 + assert it.next() == 3 + seqiter_new, args = it.__reduce__() + assert it.next() == 4 + assert it.next() == 5 + it2 = seqiter_new(*args) + assert it2.next() == 4 + assert it2.next() == 5 + it3 = seqiter_new(*args) + assert it3.next() == 4 + assert it3.next() == 5 def test_issue1266(self): l = range(1) @@ -1518,7 +1556,109 @@ assert item11 in l[::11] -class AppTestWithoutStrategies(object): +class AppTestListObjectWithRangeList(AppTestListObject): + spaceconfig = {"objspace.std.withrangelist": True} + + +class AppTestRangeListForcing: + """Tests for range lists that test forcing. Regular tests should go in + AppTestListObject so they can be run -A against CPython as well. + """ + spaceconfig = {"objspace.std.withrangelist": True} + + def setup_class(cls): + if cls.runappdirect: + py.test.skip("__pypy__.internal_repr() cannot be used to see " + "if a range list was forced on top of pypy-c") + cls.w_not_forced = cls.space.appexec([], """(): + import __pypy__ + def f(r): + return (isinstance(r, list) and + "RangeListStrategy" in __pypy__.internal_repr(r)) + return f + """) + + def test_simple(self): + result = [] + r = range(1, 8, 2) + for i in r: + result.append(i) + assert result == [1, 3, 5, 7] + assert self.not_forced(r) + + def test_getitem_slice(self): + result = [] + r = range(1, 100, 2) + for i in r[10:15]: + result.append(i) + assert result == [21, 23, 25, 27, 29] + assert not self.not_forced(r) + + def test_getitem_extended_slice(self): + result = [] + r = range(1, 100, 2) + for i in r[40:30:-2]: + result.append(i) + assert result == [81, 77, 73, 69, 65] + assert not self.not_forced(r) + + def test_repr(self): + r = range(5) + assert repr(r) == "[0, 1, 2, 3, 4]" + assert self.not_forced(r) + + def test_force(self): + r = range(10) + r[0] = 42 + assert not self.not_forced(r) + assert r == [42, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + def test_reverse(self): + r = range(10) + r.reverse() + assert not self.not_forced(r) + assert r == range(9, -1, -1) + + def test_pop(self): + # RangeListStrategy + r = range(1, 10) + res = r.pop() + assert res == 9 + assert self.not_forced(r) + assert repr(r) == repr(range(1, 9)) + res = r.pop(0) + assert res == 1 + assert self.not_forced(r) + assert repr(r) == repr(range(2, 9)) + res = r.pop(len(r) - 1) + assert res == 8 + assert self.not_forced(r) + assert repr(r) == repr(range(2, 8)) + res = r.pop(2) + assert res == 4 + assert not self.not_forced(r) + assert r == [2, 3, 5, 6, 7] + res = r.pop(2) + assert res == 5 + assert not self.not_forced(r) + assert r == [2, 3, 6, 7] + + # SimpleRangeListStrategy + r = range(10) + res = r.pop() + assert res == 9 + assert self.not_forced(r) + res = r.pop() + assert res == 8 + assert repr(r) == repr(range(8)) + assert self.not_forced(r) + res = r.pop(0) + assert res == 0 + assert not self.not_forced(r) + assert r == [1, 2, 3, 4, 5, 6, 7] + + +class AppTestWithoutStrategies: spaceconfig = {"objspace.std.withliststrategies": False} def test_no_shared_empty_list(self): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -6,8 +6,8 @@ from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject + class TestW_ListStrategies(TestW_ListObject): - def test_check_strategy(self): space = self.space w = space.wrap @@ -236,7 +236,6 @@ l.setslice(0, 1, 2, make_range_list(space, 5, 1, 4)) assert isinstance(l.strategy, IntegerListStrategy) - def test_setslice_List(self): space = self.space @@ -705,7 +704,6 @@ w_l2.sort(False) assert space.eq_w(w_l, w_l2) - def test_listview_bytes_list(self): space = self.space w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b")]) diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py deleted file mode 100644 --- a/pypy/objspace/std/test/test_rangeobject.py +++ /dev/null @@ -1,156 +0,0 @@ -import py - - -class AppTestRangeListObject(object): - spaceconfig = {"objspace.std.withrangelist": True} - - def setup_class(cls): - if cls.runappdirect: - py.test.skip("__pypy__.internal_repr() cannot be used to see " - "if a range list was forced on top of pypy-c") - cls.w_not_forced = cls.space.appexec([], """(): - import __pypy__ - def f(r): - return (isinstance(r, list) and - "RangeListStrategy" in __pypy__.internal_repr(r)) - return f - """) - cls.w_SORT_FORCES_LISTS = cls.space.wrap(False) - - def test_simple(self): - result = [] - r = range(1, 8, 2) - for i in r: - result.append(i) - assert result == [1, 3, 5, 7] - assert self.not_forced(r) - - def test_getitem_simple(self): - r = range(4) - assert r[-1] == 3 - assert r[3] == 3 - assert r[-4] == 0 - raises(IndexError, r.__getitem__, -5) - raises(IndexError, r.__getitem__, 4) - - def test_getitem_slice(self): - result = [] - r = range(1, 100, 2) - for i in r[10:15]: - result.append(i) - assert result == [21, 23, 25, 27, 29] - assert not self.not_forced(r) - - def test_getitem_extended_slice(self): - result = [] - r = range(1, 100, 2) - for i in r[40:30:-2]: - result.append(i) - assert result == [81, 77, 73, 69, 65] - assert not self.not_forced(r) - - def test_empty_range(self): - r = range(10, 10) - assert len(r) == 0 - assert list(reversed(r)) == [] - assert r[:] == [] - - def test_repr(self): - r = range(5) - assert repr(r) == "[0, 1, 2, 3, 4]" - assert self.not_forced(r) - - def test_force(self): - r = range(10) - r[0] = 42 - assert not self.not_forced(r) - assert r == [42, 1, 2, 3, 4, 5, 6, 7, 8, 9] - - def test_reverse(self): - r = range(10) - r.reverse() - assert not self.not_forced(r) - assert r == range(9, -1, -1) - r = range(3) - r[0] = 1 - assert r == [1, 1, 2] - r.reverse() - assert r == [2, 1, 1] - - r = range(10) - r.sort(key=lambda x: -x) - assert r == range(9, -1, -1) - - def test_pop(self): - # RangeListStrategy - r = range(1, 10) - res = r.pop() - assert res == 9 - assert self.not_forced(r) - assert repr(r) == repr(range(1, 9)) - res = r.pop(0) - assert res == 1 - assert self.not_forced(r) - assert repr(r) == repr(range(2, 9)) - res = r.pop(len(r) - 1) - assert res == 8 - assert self.not_forced(r) - assert repr(r) == repr(range(2, 8)) - res = r.pop(2) - assert res == 4 - assert not self.not_forced(r) - assert r == [2, 3, 5, 6, 7] - res = r.pop(2) - assert res == 5 - assert not self.not_forced(r) - assert r == [2, 3, 6, 7] - - # SimpleRangeListStrategy - r = range(10) - res = r.pop() - assert res == 9 - assert self.not_forced(r) - res = r.pop() - assert res == 8 - assert repr(r) == repr(range(8)) - assert self.not_forced(r) - res = r.pop(0) - assert res == 0 - assert not self.not_forced(r) - assert r == [1, 2, 3, 4, 5, 6, 7] - - def test_reduce(self): - it = iter(range(10)) - assert it.next() == 0 - assert it.next() == 1 - assert it.next() == 2 - assert it.next() == 3 - seqiter_new, args = it.__reduce__() - assert it.next() == 4 - assert it.next() == 5 - it2 = seqiter_new(*args) - assert it2.next() == 4 - assert it2.next() == 5 - it3 = seqiter_new(*args) - assert it3.next() == 4 - assert it3.next() == 5 - - def test_no_len_on_range_iter(self): - iterable = range(10) - raises(TypeError, len, iter(iterable)) - - def test_inplace_add(self): - r1 = r2 = range(5) - assert r1 is r2 - r1 += [15] - assert r1 is r2 - assert r1 == [0, 1, 2, 3, 4, 15] - assert r2 == [0, 1, 2, 3, 4, 15] - - def test_inplace_mul(self): - r1 = r2 = range(3) - assert r1 is r2 - r1 *= 2 - assert r1 is r2 - assert r1 == [0, 1, 2, 0, 1, 2] - assert r2 == [0, 1, 2, 0, 1, 2] From noreply at buildbot.pypy.org Wed Mar 5 21:48:02 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 21:48:02 +0100 (CET) Subject: [pypy-commit] pypy default: be more explicit Message-ID: <20140305204802.8DB1B1C02C1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69733:37e17ee041e2 Date: 2014-03-05 15:47 -0500 http://bitbucket.org/pypy/pypy/changeset/37e17ee041e2/ Log: be more explicit diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1557,12 +1557,17 @@ class AppTestListObjectWithRangeList(AppTestListObject): + """Run the list object tests with range lists enabled. Tests should go in + AppTestListObject so they can be run -A against CPython as well. + """ spaceconfig = {"objspace.std.withrangelist": True} class AppTestRangeListForcing: """Tests for range lists that test forcing. Regular tests should go in - AppTestListObject so they can be run -A against CPython as well. + AppTestListObject so they can be run -A against CPython as well. Separate + from AppTestListObjectWithRangeList so we don't silently overwrite tests + with the same names. """ spaceconfig = {"objspace.std.withrangelist": True} From noreply at buildbot.pypy.org Wed Mar 5 22:22:29 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 22:22:29 +0100 (CET) Subject: [pypy-commit] pypy numpypy-nditer: merge this test with the existing one Message-ID: <20140305212229.74CE31C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpypy-nditer Changeset: r69734:f7b0b2365c0f Date: 2014-03-05 16:01 -0500 http://bitbucket.org/pypy/pypy/changeset/f7b0b2365c0f/ Log: merge this test with the existing one diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2116,23 +2116,6 @@ assert (b == [20, 1, 21, 3, 4]).all() raises(ValueError, "array([1, 2])[array([True, False, True])] = [1, 2, 3]") - def test_ellipse_index(self): - from numpypy import arange, array - b = arange(24).reshape(2,3,4) - b[...] = 100 - assert (b == 100).all() - assert b.shape == (2, 3, 4) - b[...] = [10, 20, 30, 40] - assert (b[:,:,0] == 10).all() - assert (b[0,0,:] == [10, 20, 30, 40]).all() - assert b.shape == b[...].shape - assert (b == b[...]).all() - - a = array(1) - a[...] = 100 - assert (a == 100).all() - assert a == a[...] - def test_weakref(self): import _weakref from numpypy import array @@ -2345,6 +2328,16 @@ a[...] = 4 assert (a == [4, 4, 4]).all() + b = np.arange(24).reshape(2,3,4) + b[...] = 100 + assert (b == 100).all() + assert b.shape == (2, 3, 4) + b[...] = [10, 20, 30, 40] + assert (b[:,:,0] == 10).all() + assert (b[0,0,:] == [10, 20, 30, 40]).all() + assert b.shape == b[...].shape + assert (b == b[...]).all() + class AppTestNumArrayFromBuffer(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) From noreply at buildbot.pypy.org Wed Mar 5 22:22:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 22:22:30 +0100 (CET) Subject: [pypy-commit] pypy default: simplify Message-ID: <20140305212230.B462E1C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69735:240dfa51dd8c Date: 2014-03-05 16:09 -0500 http://bitbucket.org/pypy/pypy/changeset/240dfa51dd8c/ Log: simplify diff --git a/pypy/interpreter/special.py b/pypy/interpreter/special.py --- a/pypy/interpreter/special.py +++ b/pypy/interpreter/special.py @@ -2,16 +2,10 @@ class Ellipsis(W_Root): - def __init__(self, space): - self.space = space - - def descr__repr__(self): - return self.space.wrap('Ellipsis') + def descr__repr__(self, space): + return space.wrap('Ellipsis') class NotImplemented(W_Root): - def __init__(self, space): - self.space = space - - def descr__repr__(self): - return self.space.wrap('NotImplemented') + def descr__repr__(self, space): + return space.wrap('NotImplemented') diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -71,8 +71,8 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild - self.w_Ellipsis = special.Ellipsis(self) - self.w_NotImplemented = special.NotImplemented(self) + self.w_Ellipsis = special.Ellipsis() + self.w_NotImplemented = special.NotImplemented() def _freeze_(self): return True diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -58,8 +58,8 @@ self.w_None = W_NoneObject.w_None self.w_False = W_BoolObject.w_False self.w_True = W_BoolObject.w_True - self.w_NotImplemented = self.wrap(special.NotImplemented(self)) - self.w_Ellipsis = self.wrap(special.Ellipsis(self)) + self.w_NotImplemented = self.wrap(special.NotImplemented()) + self.w_Ellipsis = self.wrap(special.Ellipsis()) # types self.builtin_types = {} From noreply at buildbot.pypy.org Wed Mar 5 22:22:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 22:22:32 +0100 (CET) Subject: [pypy-commit] pypy numpypy-nditer: merge default Message-ID: <20140305212232.037E51C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpypy-nditer Changeset: r69736:6d344175d0f5 Date: 2014-03-05 16:10 -0500 http://bitbucket.org/pypy/pypy/changeset/6d344175d0f5/ Log: merge default diff --git a/pypy/interpreter/special.py b/pypy/interpreter/special.py --- a/pypy/interpreter/special.py +++ b/pypy/interpreter/special.py @@ -2,16 +2,10 @@ class Ellipsis(W_Root): - def __init__(self, space): - self.space = space - def descr__repr__(self, space): return space.wrap('Ellipsis') class NotImplemented(W_Root): - def __init__(self, space): - self.space = space - def descr__repr__(self, space): return space.wrap('NotImplemented') diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -71,8 +71,8 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild - self.w_Ellipsis = special.Ellipsis(self) - self.w_NotImplemented = special.NotImplemented(self) + self.w_Ellipsis = special.Ellipsis() + self.w_NotImplemented = special.NotImplemented() def _freeze_(self): return True diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -176,14 +176,14 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match(""" guard_not_invalidated? - i16 = int_lt(i11, i12) - guard_true(i16, descr=...) + i16 = int_ge(i11, i12) + guard_false(i16, descr=...) i20 = int_add(i11, 1) i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? - i25 = int_lt(i11, i9) - guard_true(i25, descr=...) + i25 = int_ge(i11, i9) + guard_false(i25, descr=...) i27 = int_add_ovf(i7, i11) guard_no_overflow(descr=...) --TICK-- @@ -214,10 +214,10 @@ i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? - i23 = int_ge(i18, 0) - guard_true(i23, descr=...) - i25 = int_lt(i18, i9) - guard_true(i25, descr=...) + i23 = int_lt(i18, 0) + guard_false(i23, descr=...) + i25 = int_ge(i18, i9) + guard_false(i25, descr=...) i27 = int_add_ovf(i7, i18) guard_no_overflow(descr=...) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -80,7 +80,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p93 = call(ConstClass(fromstr2), p25, 16, descr=) + p93 = call(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) i94 = call(ConstClass(rbigint.toint), p93, descr=) guard_no_exception(descr=...) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -542,7 +542,7 @@ def _string_to_w_long(space, w_longtype, w_source, string, base=10): try: - bigint = rbigint.fromstr2(string, base) + bigint = rbigint.fromstr(string, base) except ParseStringError as e: from pypy.objspace.std.intobject import wrap_parsestringerror raise wrap_parsestringerror(space, e, w_source) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -58,8 +58,8 @@ self.w_None = W_NoneObject.w_None self.w_False = W_BoolObject.w_False self.w_True = W_BoolObject.w_True - self.w_NotImplemented = self.wrap(special.NotImplemented(self)) - self.w_Ellipsis = self.wrap(special.Ellipsis(self)) + self.w_NotImplemented = self.wrap(special.NotImplemented()) + self.w_Ellipsis = self.wrap(special.Ellipsis()) # types self.builtin_types = {} diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -431,7 +431,7 @@ intlist.find(w(4), 0, 2) -class AppTestW_ListObject(object): +class AppTestListObject(object): def setup_class(cls): import platform import sys @@ -525,6 +525,18 @@ l.__init__(assignment) assert l == list(assignment) + def test_range_init(self): + x = range(5,1) + assert x == [] + + x = range(1,10) + x[22:0:-1] == range(1,10) + + r = range(10, 10) + assert len(r) == 0 + assert list(reversed(r)) == [] + assert r[:] == [] + def test_extend_list(self): l = l0 = [1] l.extend([2]) @@ -609,24 +621,28 @@ def test_sort_key(self): def lower(x): return x.lower() l = ['a', 'C', 'b'] - l.sort(key = lower) + l.sort(key=lower) assert l == ['a', 'b', 'C'] l = [] - l.sort(key = lower) + l.sort(key=lower) assert l == [] - l = [ 'a' ] - l.sort(key = lower) - assert l == [ 'a' ] + l = ['a'] + l.sort(key=lower) + assert l == ['a'] + + r = range(10) + r.sort(key=lambda x: -x) + assert r == range(9, -1, -1) def test_sort_reversed(self): l = range(10) - l.sort(reverse = True) + l.sort(reverse=True) assert l == range(9, -1, -1) l = [] - l.sort(reverse = True) + l.sort(reverse=True) assert l == [] l = [1] - l.sort(reverse = True) + l.sort(reverse=True) assert l == [1] def test_sort_cmp_key_reverse(self): @@ -640,6 +656,17 @@ l.sort() assert l == ["a", "b", "c", "d"] + def test_sort_range(self): + l = range(3, 10, 3) + l.sort() + assert l == [3, 6, 9] + l.sort(reverse=True) + assert l == [9, 6, 3] + l.sort(reverse=True) + assert l == [9, 6, 3] + l.sort() + assert l == [3, 6, 9] + def test_getitem(self): l = [1, 2, 3, 4, 5, 6, 9] assert l[0] == 1 @@ -663,6 +690,23 @@ l = [] raises(IndexError, "l[1]") + def test_getitem_range(self): + l = range(5) + raises(IndexError, "l[-6]") + raises(IndexError, "l[5]") + assert l[0] == 0 + assert l[-1] == 4 + assert l[-2] == 3 + assert l[-5] == 0 + + l = range(1, 5) + raises(IndexError, "l[-5]") + raises(IndexError, "l[4]") + assert l[0] == 1 + assert l[-1] == 4 + assert l[-2] == 3 + assert l[-4] == 1 + def test_setitem(self): l = [] raises(IndexError, "l[1] = 2") @@ -675,6 +719,10 @@ l[0] = "2" assert l == ["2",3] + l = range(3) + l[0] = 1 + assert l == [1,1,2] + def test_delitem(self): l = [1, 2, 3, 4, 5, 6, 9] del l[0] @@ -740,6 +788,29 @@ assert l[1:0:None] == [] assert l[1:0] == [] + def test_getslice_invalid(self): + x = [1,2,3,4] + assert x[10:0] == [] + assert x[10:0:None] == [] + + x = range(1,5) + assert x[10:0] == [] + assert x[10:0:None] == [] + + assert x[0:22] == [1,2,3,4] + assert x[-1:10] == [4] + + assert x[0:22:None] == [1,2,3,4] + assert x[-1:10:None] == [4] + + def test_getslice_range_backwards(self): + x = range(1,10) + assert x[22:-10] == [] + assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1] + assert x[10:3:-1] == [9,8,7,6,5] + assert x[10:3:-2] == [9,7,5] + assert x[1:5:-1] == [] + def test_delall(self): l = l0 = [1,2,3] del l[:] @@ -777,6 +848,13 @@ l1 += [0] assert l1 == ['a', 'b', 'c', 0] + r1 = r2 = range(5) + assert r1 is r2 + r1 += [15] + assert r1 is r2 + assert r1 == [0, 1, 2, 3, 4, 15] + assert r2 == [0, 1, 2, 3, 4, 15] + def test_iadd_iterable(self): l = l0 = [1,2,3] l += iter([4,5]) @@ -784,8 +862,6 @@ assert l == [1,2,3,4,5] def test_iadd_subclass(self): - #XXX - skip("Maybe there is something wrong in descroperation?") class Bar(object): def __radd__(self, other): return ('radd', self, other) @@ -837,6 +913,13 @@ l *= 2 assert l == [0, 1, 0, 1] + r1 = r2 = range(3) + assert r1 is r2 + r1 *= 2 + assert r1 is r2 + assert r1 == [0, 1, 2, 0, 1, 2] + assert r2 == [0, 1, 2, 0, 1, 2] + def test_mul_errors(self): try: [1, 2, 3] * (3,) @@ -918,6 +1001,11 @@ assert l == [] assert l is l0 + l = [] + l2 = range(3) + l.__setslice__(0,3,l2) + assert l == [0,1,2] + def test_assign_extended_slice(self): l = l0 = ['a', 'b', 'c'] l[::-1] = ['a', 'b', 'c'] @@ -1004,10 +1092,6 @@ l.append(x) assert l == range(5) - l = range(4) - l.append(4) - assert l == range(5) - l = [1,2,3] l.append("a") assert l == [1,2,3,"a"] @@ -1016,6 +1100,22 @@ l.append(4.4) assert l == [1.1, 2.2, 3.3, 4.4] + l = range(4) + l.append(4) + assert l == range(5) + + l = range(5) + l.append(26) + assert l == [0,1,2,3,4,26] + + l = range(5) + l.append("a") + assert l == [0,1,2,3,4,"a"] + + l = range(5) + l.append(5) + assert l == [0,1,2,3,4,5] + def test_count(self): c = list('hello') assert c.count('l') == 2 @@ -1043,6 +1143,10 @@ l.insert(0,"a") assert l == ["a", 1, 2, 3] + l = range(3) + l.insert(1,5) + assert l == [0,5,1,2] + def test_pop(self): c = list('hello world') s = '' @@ -1055,6 +1159,7 @@ l = range(10) l.pop() assert l == range(9) + assert l.pop(0) == 0 l = [1.1, 2.2, 3.3] l.pop() @@ -1125,6 +1230,16 @@ c.reverse() assert ''.join(c) == 'dlrow olleh' + l = range(3) + l.reverse() + assert l == [2,1,0] + + r = range(3) + r[0] = 1 + assert r == [1, 1, 2] + r.reverse() + assert r == [2, 1, 1] + def test_reversed(self): assert list(list('hello').__reversed__()) == ['o', 'l', 'l', 'e', 'h'] assert list(reversed(list('hello'))) == ['o', 'l', 'l', 'e', 'h'] @@ -1389,106 +1504,27 @@ # assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"] + def test_no_len_on_range_iter(self): + iterable = range(10) + raises(TypeError, len, iter(iterable)) -class AppTestForRangeLists(AppTestW_ListObject): - spaceconfig = {"objspace.std.withrangelist": True} - - def test_range_simple_backwards(self): - x = range(5,1) - assert x == [] - - def test_range_big_start(self): - x = range(1,10) - x[22:0:-1] == range(1,10) - - def test_range_list_invalid_slice(self): - x = [1,2,3,4] - assert x[10:0] == [] - assert x[10:0:None] == [] - - x = range(1,5) - assert x[10:0] == [] - assert x[10:0:None] == [] - - assert x[0:22] == [1,2,3,4] - assert x[-1:10] == [4] - - assert x[0:22:None] == [1,2,3,4] - assert x[-1:10:None] == [4] - - def test_range_backwards(self): - x = range(1,10) - assert x[22:-10] == [] - assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1] - assert x[10:3:-1] == [9,8,7,6,5] - assert x[10:3:-2] == [9,7,5] - assert x[1:5:-1] == [] - - def test_sort_range(self): - l = range(3,10,3) - l.sort() - assert l == [3, 6, 9] - l.sort(reverse = True) - assert l == [9, 6, 3] - l.sort(reverse = True) - assert l == [9, 6, 3] - l.sort() - assert l == [3, 6, 9] - - def test_slice(self): - l = [] - l2 = range(3) - l.__setslice__(0,3,l2) - assert l == [0,1,2] - - def test_getitem(self): - l = range(5) - raises(IndexError, "l[-6]") - raises(IndexError, "l[5]") - assert l[0] == 0 - assert l[-1] == 4 - assert l[-2] == 3 - assert l[-5] == 0 - - l = range(1, 5) - raises(IndexError, "l[-5]") - raises(IndexError, "l[4]") - assert l[0] == 1 - assert l[-1] == 4 - assert l[-2] == 3 - assert l[-4] == 1 - - def test_append(self): - l = range(5) - l.append(26) - assert l == [0,1,2,3,4,26] - - l = range(5) - l.append("a") - assert l == [0,1,2,3,4,"a"] - - l = range(5) - l.append(5) - assert l == [0,1,2,3,4,5] - - def test_pop(self): - l = range(3) - assert l.pop(0) == 0 - - def test_setitem(self): - l = range(3) - l[0] = 1 - assert l == [1,1,2] - - def test_inset(self): - l = range(3) - l.insert(1,5) - assert l == [0,5,1,2] - - def test_reverse(self): - l = range(3) - l.reverse() - assert l == [2,1,0] + def test_reduce(self): + if self.on_cpython: + skip("cpython raises TypeError") # XXX investigate + it = iter(range(10)) + assert it.next() == 0 + assert it.next() == 1 + assert it.next() == 2 + assert it.next() == 3 + seqiter_new, args = it.__reduce__() + assert it.next() == 4 + assert it.next() == 5 + it2 = seqiter_new(*args) + assert it2.next() == 4 + assert it2.next() == 5 + it3 = seqiter_new(*args) + assert it3.next() == 4 + assert it3.next() == 5 def test_issue1266(self): l = range(1) @@ -1520,7 +1556,114 @@ assert item11 in l[::11] -class AppTestWithoutStrategies(object): +class AppTestListObjectWithRangeList(AppTestListObject): + """Run the list object tests with range lists enabled. Tests should go in + AppTestListObject so they can be run -A against CPython as well. + """ + spaceconfig = {"objspace.std.withrangelist": True} + + +class AppTestRangeListForcing: + """Tests for range lists that test forcing. Regular tests should go in + AppTestListObject so they can be run -A against CPython as well. Separate + from AppTestListObjectWithRangeList so we don't silently overwrite tests + with the same names. + """ + spaceconfig = {"objspace.std.withrangelist": True} + + def setup_class(cls): + if cls.runappdirect: + py.test.skip("__pypy__.internal_repr() cannot be used to see " + "if a range list was forced on top of pypy-c") + cls.w_not_forced = cls.space.appexec([], """(): + import __pypy__ + def f(r): + return (isinstance(r, list) and + "RangeListStrategy" in __pypy__.internal_repr(r)) + return f + """) + + def test_simple(self): + result = [] + r = range(1, 8, 2) + for i in r: + result.append(i) + assert result == [1, 3, 5, 7] + assert self.not_forced(r) + + def test_getitem_slice(self): + result = [] + r = range(1, 100, 2) + for i in r[10:15]: + result.append(i) + assert result == [21, 23, 25, 27, 29] + assert not self.not_forced(r) + + def test_getitem_extended_slice(self): + result = [] + r = range(1, 100, 2) + for i in r[40:30:-2]: + result.append(i) + assert result == [81, 77, 73, 69, 65] + assert not self.not_forced(r) + + def test_repr(self): + r = range(5) + assert repr(r) == "[0, 1, 2, 3, 4]" + assert self.not_forced(r) + + def test_force(self): + r = range(10) + r[0] = 42 + assert not self.not_forced(r) + assert r == [42, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + def test_reverse(self): + r = range(10) + r.reverse() + assert not self.not_forced(r) + assert r == range(9, -1, -1) + + def test_pop(self): + # RangeListStrategy + r = range(1, 10) + res = r.pop() + assert res == 9 + assert self.not_forced(r) + assert repr(r) == repr(range(1, 9)) + res = r.pop(0) + assert res == 1 + assert self.not_forced(r) + assert repr(r) == repr(range(2, 9)) + res = r.pop(len(r) - 1) + assert res == 8 + assert self.not_forced(r) + assert repr(r) == repr(range(2, 8)) + res = r.pop(2) + assert res == 4 + assert not self.not_forced(r) + assert r == [2, 3, 5, 6, 7] + res = r.pop(2) + assert res == 5 + assert not self.not_forced(r) + assert r == [2, 3, 6, 7] + + # SimpleRangeListStrategy + r = range(10) + res = r.pop() + assert res == 9 + assert self.not_forced(r) + res = r.pop() + assert res == 8 + assert repr(r) == repr(range(8)) + assert self.not_forced(r) + res = r.pop(0) + assert res == 0 + assert not self.not_forced(r) + assert r == [1, 2, 3, 4, 5, 6, 7] + + +class AppTestWithoutStrategies: spaceconfig = {"objspace.std.withliststrategies": False} def test_no_shared_empty_list(self): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -6,8 +6,8 @@ from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject + class TestW_ListStrategies(TestW_ListObject): - def test_check_strategy(self): space = self.space w = space.wrap @@ -236,7 +236,6 @@ l.setslice(0, 1, 2, make_range_list(space, 5, 1, 4)) assert isinstance(l.strategy, IntegerListStrategy) - def test_setslice_List(self): space = self.space @@ -705,7 +704,6 @@ w_l2.sort(False) assert space.eq_w(w_l, w_l2) - def test_listview_bytes_list(self): space = self.space w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b")]) diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py deleted file mode 100644 --- a/pypy/objspace/std/test/test_rangeobject.py +++ /dev/null @@ -1,154 +0,0 @@ -import py - -class AppTestRangeListObject(object): - spaceconfig = {"objspace.std.withrangelist": True} - - def setup_class(cls): - if cls.runappdirect: - py.test.skip("__pypy__.internal_repr() cannot be used to see " - "if a range list was forced on top of pypy-c") - cls.w_not_forced = cls.space.appexec([], """(): - import __pypy__ - def f(r): - return (isinstance(r, list) and - "RangeListStrategy" in __pypy__.internal_repr(r)) - return f - """) - cls.w_SORT_FORCES_LISTS = cls.space.wrap(False) - - def test_simple(self): - result = [] - r = range(1, 8, 2) - for i in r: - result.append(i) - assert result == [1, 3, 5, 7] - assert self.not_forced(r) - - def test_getitem_slice(self): - result = [] - r = range(1, 100, 2) - for i in r[10:15]: - result.append(i) - assert result == [21, 23, 25, 27, 29] - assert not self.not_forced(r) - - def test_getitem_extended_slice(self): - result = [] - r = range(1, 100, 2) - for i in r[40:30:-2]: - result.append(i) - assert result == [81, 77, 73, 69, 65] - assert not self.not_forced(r) - - def test_empty_range(self): - r = range(10, 10) - assert len(r) == 0 - assert list(reversed(r)) == [] - assert r[:] == [] - - def test_repr(self): - r = range(5) - assert repr(r) == "[0, 1, 2, 3, 4]" - assert self.not_forced(r) - - def test_force(self): - r = range(10) - r[0] = 42 - assert not self.not_forced(r) - assert r == [42, 1, 2, 3, 4, 5, 6, 7, 8, 9] - - def test_reverse(self): - r = range(10) - r.reverse() - assert not self.not_forced(r) - assert r == range(9, -1, -1) - r = range(3) - r[0] = 1 - assert r == [1, 1, 2] - r.reverse() - assert r == [2, 1, 1] - - r = range(10) - r.sort(key=lambda x: -x) - assert r == range(9, -1, -1) - def test_pop(self): - # RangeListStrategy - r = range(1, 10) - res = r.pop() - assert res == 9 - assert self.not_forced(r) - assert repr(r) == repr(range(1, 9)) - res = r.pop(0) - assert res == 1 - assert self.not_forced(r) - assert repr(r) == repr(range(2, 9)) - res = r.pop(len(r) - 1) - assert res == 8 - assert self.not_forced(r) - assert repr(r) == repr(range(2, 8)) - res = r.pop(2) - assert res == 4 - assert not self.not_forced(r) - assert r == [2, 3, 5, 6, 7] - res = r.pop(2) - assert res == 5 - assert not self.not_forced(r) - assert r == [2, 3, 6, 7] - - # SimpleRangeListStrategy - r = range(10) - res = r.pop() - assert res == 9 - assert self.not_forced(r) - res = r.pop() - assert res == 8 - assert repr(r) == repr(range(8)) - assert self.not_forced(r) - res = r.pop(0) - assert res == 0 - assert not self.not_forced(r) - assert r == [1, 2, 3, 4, 5, 6, 7] - - def test_getitem_simple(self): - r = range(4) - assert r[-1] == 3 - assert r[3] == 3 - assert r[-4] == 0 - raises(IndexError, r.__getitem__, -5) - raises(IndexError, r.__getitem__, 4) - - def test_reduce(self): - it = iter(range(10)) - assert it.next() == 0 - assert it.next() == 1 - assert it.next() == 2 - assert it.next() == 3 - seqiter_new, args = it.__reduce__() - assert it.next() == 4 - assert it.next() == 5 - it2 = seqiter_new(*args) - assert it2.next() == 4 - assert it2.next() == 5 - it3 = seqiter_new(*args) - assert it3.next() == 4 - assert it3.next() == 5 - - def test_no_len_on_range_iter(self): - iterable = range(10) - raises(TypeError, len, iter(iterable)) - - def test_inplace_add(self): - r1 = r2 = range(5) - assert r1 is r2 - r1 += [15] - assert r1 is r2 - assert r1 == [0, 1, 2, 3, 4, 15] - assert r2 == [0, 1, 2, 3, 4, 15] - - def test_inplace_mul(self): - r1 = r2 = range(3) - assert r1 is r2 - r1 *= 2 - assert r1 is r2 - assert r1 == [0, 1, 2, 0, 1, 2] - assert r2 == [0, 1, 2, 0, 1, 2] diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -254,28 +254,19 @@ @staticmethod @jit.elidable - def fromstr(s, base=0, ignore_l_suffix=False, fname='long'): - """As string_to_int(), but optionally ignores an optional 'l' or - 'L' suffix and returns an rbigint. - """ + def fromstr(s, base=0): + """As string_to_int(), but ignores an optional 'l' or 'L' suffix + and returns an rbigint.""" from rpython.rlib.rstring import NumberStringParser, \ strip_spaces s = literal = strip_spaces(s) - if (not ignore_l_suffix and (s.endswith('l') or s.endswith('L')) and - base < 22): + if (s.endswith('l') or s.endswith('L')) and base < 22: # in base 22 and above, 'L' is a valid digit! try: long('L',22) s = s[:-1] - parser = NumberStringParser(s, literal, base, fname) + parser = NumberStringParser(s, literal, base, 'long') return rbigint._from_numberstring_parser(parser) @staticmethod - @jit.elidable - def fromstr2(s, base=0): - """A sub-version of fromstr(), already elidable to be JIT-called - with only two arguments.""" - return rbigint.fromstr(s, base) - - @staticmethod def _from_numberstring_parser(parser): return parse_digit_string(parser) diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -214,19 +214,13 @@ from rpython.rlib.rstring import ParseStringError assert rbigint.fromstr('123L').tolong() == 123 assert rbigint.fromstr('123L ').tolong() == 123 - py.test.raises(ParseStringError, rbigint.fromstr, '123L ', - ignore_l_suffix=True) py.test.raises(ParseStringError, rbigint.fromstr, 'L') py.test.raises(ParseStringError, rbigint.fromstr, 'L ') - e = py.test.raises(ParseStringError, rbigint.fromstr, 'L ', - fname='int') - assert 'int()' in e.value.msg assert rbigint.fromstr('123L', 4).tolong() == 27 assert rbigint.fromstr('123L', 30).tolong() == 27000 + 1800 + 90 + 21 assert rbigint.fromstr('123L', 22).tolong() == 10648 + 968 + 66 + 21 assert rbigint.fromstr('123L', 21).tolong() == 441 + 42 + 3 assert rbigint.fromstr('1891234174197319').tolong() == 1891234174197319 - assert rbigint.fromstr2('123L', 4).tolong() == 27 def test_from_numberstring_parser(self): from rpython.rlib.rstring import NumberStringParser From noreply at buildbot.pypy.org Wed Mar 5 22:28:25 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 22:28:25 +0100 (CET) Subject: [pypy-commit] pypy default: add this extra test Message-ID: <20140305212825.8594D1C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69737:b03d8d46d83d Date: 2014-03-05 16:25 -0500 http://bitbucket.org/pypy/pypy/changeset/b03d8d46d83d/ Log: add this extra test diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2328,6 +2328,16 @@ a[...] = 4 assert (a == [4, 4, 4]).all() + b = np.arange(24).reshape(2,3,4) + b[...] = 100 + assert (b == 100).all() + assert b.shape == (2, 3, 4) + b[...] = [10, 20, 30, 40] + assert (b[:,:,0] == 10).all() + assert (b[0,0,:] == [10, 20, 30, 40]).all() + assert b.shape == b[...].shape + assert (b == b[...]).all() + class AppTestNumArrayFromBuffer(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) From noreply at buildbot.pypy.org Wed Mar 5 22:28:26 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 22:28:26 +0100 (CET) Subject: [pypy-commit] pypy numpypy-nditer: merge default Message-ID: <20140305212826.B170E1C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpypy-nditer Changeset: r69738:cface9ad9ad7 Date: 2014-03-05 16:27 -0500 http://bitbucket.org/pypy/pypy/changeset/cface9ad9ad7/ Log: merge default diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -2,7 +2,6 @@ from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized -from pypy.interpreter.special import Ellipsis from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ raw_storage_getitem, raw_storage_setitem, RAW_STORAGE from rpython.rtyper.lltypesystem import rffi, lltype @@ -171,7 +170,6 @@ """ if (space.isinstance_w(w_idx, space.w_str) or space.isinstance_w(w_idx, space.w_slice) or - isinstance(w_idx, Ellipsis) or space.is_w(w_idx, space.w_None)): raise IndexError if isinstance(w_idx, W_NDimArray) and not w_idx.is_scalar(): @@ -227,7 +225,7 @@ raise OperationError(space.w_IndexError, space.wrap( "arrays used as indices must be of integer (or boolean) type")) return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) - elif space.is_w(w_idx, space.w_None) or isinstance(w_idx, Ellipsis): + elif space.is_w(w_idx, space.w_None): return Chunks([NewAxisChunk()]) result = [] i = 0 From noreply at buildbot.pypy.org Wed Mar 5 23:34:51 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 23:34:51 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: don't need a test_ztranslation in submodule Message-ID: <20140305223451.E18841C3152@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69739:5eaa6b2dc7ce Date: 2014-03-05 01:30 -0500 http://bitbucket.org/pypy/pypy/changeset/5eaa6b2dc7ce/ Log: don't need a test_ztranslation in submodule diff --git a/pypy/module/_rawffi/alt/test/test_ztranslation.py b/pypy/module/_rawffi/alt/test/test_ztranslation.py deleted file mode 100644 --- a/pypy/module/_rawffi/alt/test/test_ztranslation.py +++ /dev/null @@ -1,4 +0,0 @@ -from pypy.objspace.fake.checkmodule import checkmodule - -def test__ffi_translates(): - checkmodule('_rawffi.alt') From noreply at buildbot.pypy.org Wed Mar 5 23:34:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 5 Mar 2014 23:34:53 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: cleanup Message-ID: <20140305223453.383831C3152@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69740:0a4d945c60dc Date: 2014-03-05 16:58 -0500 http://bitbucket.org/pypy/pypy/changeset/0a4d945c60dc/ Log: cleanup diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -217,9 +217,7 @@ expected_filename = str(udir.join('sample')) expected_mode = 'rb' extra_args = () - spaceconfig = { - "usemodules": ["binascii", "rctime"], - } + spaceconfig = {"usemodules": ["binascii", "rctime"]} def setup_method(self, method): space = self.space @@ -279,9 +277,7 @@ expected_filename = '' expected_mode = 'rb' extra_args = () - spaceconfig = { - "usemodules": ["binascii", "rctime"], - } + spaceconfig = {"usemodules": ["binascii", "rctime"]} def setup_method(self, method): space = self.space @@ -359,9 +355,7 @@ # A few extra tests class AppTestAFewExtra: - spaceconfig = { - "usemodules": ['array', '_socket', 'binascii', 'rctime'], - } + spaceconfig = {"usemodules": ['array', '_socket', 'binascii', 'rctime']} def setup_method(self, method): fn = str(udir.join('temptestfile')) diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -26,16 +26,13 @@ You typically take a basis stream, place zero or more filtering streams on top of it, and then top it off with an input-buffering and/or an outout-buffering stream. - """ -# # File offsets are all 'r_longlong', but a single read or write cannot # transfer more data that fits in an RPython 'int' (because that would not # fit in a single string anyway). This module needs to be careful about # where r_longlong values end up: as argument to seek() and truncate() and # return value of tell(), but not as argument to read(). -# import os, sys, errno from rpython.rlib.objectmodel import specialize, we_are_translated @@ -56,14 +53,12 @@ } class MyNotImplementedError(Exception): - """ - Catching NotImplementedError is not RPython, so we use this custom class + """Catching NotImplementedError is not RPython, so we use this custom class instead of it """ # ____________________________________________________________ - def replace_crlf_with_lf(s): substrings = s.split("\r") result = [substrings[0]] @@ -206,7 +201,6 @@ class Stream(object): - """Base class for streams. Provides a default implementation of some methods.""" @@ -281,7 +275,6 @@ class DiskFile(Stream): - """Standard I/O basis stream using os.open/close/read/write/lseek""" def __init__(self, fd): @@ -361,7 +354,6 @@ # next class is not RPython class MMapFile(Stream): - """Standard I/O basis stream using mmap.""" def __init__(self, fd, mmapaccess): @@ -508,7 +500,6 @@ return intoffset class BufferingInputStream(Stream): - """Standard buffering input stream. This, and BufferingOutputStream if needed, are typically at the top of @@ -722,7 +713,6 @@ class BufferingOutputStream(Stream): - """Standard buffering output stream. This, and BufferingInputStream if needed, are typically at the top of @@ -780,7 +770,6 @@ class LineBufferingOutputStream(BufferingOutputStream): - """Line buffering output stream. This is typically the top of the stack. @@ -811,12 +800,9 @@ self.buf = [data[p:]] self.buflen = len(self.buf[0]) - # ____________________________________________________________ - class CRLFFilter(Stream): - """Filtering stream for universal newlines. TextInputFilter is more general, but this is faster when you don't @@ -846,7 +832,6 @@ flush_buffers=False) class TextCRLFFilter(Stream): - """Filtering stream for universal newlines. TextInputFilter is more general, but this is faster when you don't @@ -913,9 +898,8 @@ close1 = PassThrough("close1", flush_buffers=False) try_to_find_file_descriptor = PassThrough("try_to_find_file_descriptor", flush_buffers=False) - + class TextInputFilter(Stream): - """Filtering input stream for universal newline translation.""" def __init__(self, base): @@ -948,7 +932,7 @@ # CR separator or half of a CRLF separator. Neither will be marked # as seen, since you are waiting for your next read to determine # what you have seen. But there's no more to read ... - + if self.atcr: if data.startswith("\n"): data = data[1:] @@ -958,7 +942,7 @@ else: self.CR = True self.atcr = False - + for i in range(len(data)): if data[i] == '\n': if i > 0 and data[i-1] == '\r': @@ -968,11 +952,11 @@ elif data[i] == '\r': if i < len(data)-1 and data[i+1] != '\n': self.CR = True - + if "\r" in data: self.atcr = data.endswith("\r") data = replace_crlf_with_lf(data) - + return data def readline(self): @@ -1013,7 +997,7 @@ if self.atcr: # Must read the next byte to see if it's \n, # because then we must report the next position. - assert not self.buf + assert not self.buf self.buf = self.do_read(1) pos += 1 self.atcr = False @@ -1050,7 +1034,6 @@ class TextOutputFilter(Stream): - """Filtering output stream for universal newline translation.""" def __init__(self, base, linesep=os.linesep): @@ -1105,7 +1088,6 @@ # The following functions are _not_ RPython! class DecodingInputFilter(Stream): - """Filtering input stream that decodes an encoded file.""" def __init__(self, base, encoding="utf8", errors="strict"): @@ -1152,7 +1134,6 @@ flush_buffers=False) class EncodingOutputFilter(Stream): - """Filtering output stream that writes to an encoded file.""" def __init__(self, base, encoding="utf8", errors="strict"): From noreply at buildbot.pypy.org Thu Mar 6 00:05:22 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 6 Mar 2014 00:05:22 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: hg merge default Message-ID: <20140305230522.A0DFB1C0290@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r69741:f825f050bb2a Date: 2014-03-06 00:01 +0100 http://bitbucket.org/pypy/pypy/changeset/f825f050bb2a/ Log: hg merge default diff too long, truncating to 2000 out of 43920 lines diff --git a/dotviewer/graphdisplay.py b/dotviewer/graphdisplay.py --- a/dotviewer/graphdisplay.py +++ b/dotviewer/graphdisplay.py @@ -136,6 +136,7 @@ Click on objects to move around Drag with the left mouse button to zoom in/out Drag with the right mouse button to scroll + Use scroll wheel do scroll up or down """.replace('\n ', '\n').strip() # poor man's dedent @@ -613,6 +614,19 @@ def process_MouseButtonUp(self, event): self.dragging = None pygame.event.set_grab(False) + # handle directional scrolling + if event.button == 4: + self.pan((0, -1)) + return + if event.button == 5: + self.pan((0, 1)) + return + if event.button == 6: + self.pan((-1, 0)) + return + if event.button == 7: + self.pan((1, 0)) + return if self.click_time is not None and abs(time.time() - self.click_time) < 1: # click (no significant dragging) self.notifyclick(self.click_origin) diff --git a/include/PyPy.h b/include/PyPy.h new file mode 100644 --- /dev/null +++ b/include/PyPy.h @@ -0,0 +1,60 @@ +#ifndef _PYPY_H_ +#define _PYPY_H_ + +/* This header is meant to be included in programs that use PyPy as an + embedded library. */ + +#ifdef __cplusplus +extern "C" { +#endif + +// call this first +void rpython_startup_code(void); + +// pypy_init_threads has to be called in case you want to use threads +void pypy_init_threads(void); + +/* Initialize the home directory of PyPy. It is necessary to call this. + + Call it with "home" being the file name of the libpypy.so, for + example; it will be used as a starting point when searching for the + lib-python and lib_pypy directories. They are searched from + "home/..", "home/../..", etc. Returns 0 if everything was fine. If + an error occurs, returns 1 and (if verbose != 0) prints some + information to stderr. + */ +int pypy_setup_home(char *home, int verbose); + + +/* If your program has multiple threads, then you need to call + pypy_thread_attach() once in each other thread that just started + and in which you want to run Python code (including via callbacks, + see below). DO NOT CALL IT IN THE MAIN THREAD + */ +void pypy_thread_attach(void); + + +/* The main entry point: executes "source" as plain Python code. + Returns 0 if everything was fine. If a Python exception is + uncaught, it is printed to stderr and 1 is returned. + + Usually, the Python code from "source" should use cffi to fill in + global variables of "function pointer" type in your program. Use + cffi callbacks to do so. Once it is done, there is no need to call + pypy_execute_source() any more: from C, you call directly the + functions (which are "callbacks" from the point of view of Python). + */ +int pypy_execute_source(char *source); + +/* a similar function, but inside Python code it'll register + a magic argument c_argument as int, which will be passed as void* from C. + Useful for passing pointers to arbitrary structs that contain callbacks + to register */ +int pypy_execute_source_ptr(char *source, void* ptr); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -105,7 +105,6 @@ self.assertEqual(ArgType, type(parm)) - @xfail def test_floats(self): # c_float and c_double can be created from # Python int, long and float diff --git a/lib-python/2.7/test/test_audioop.py b/lib-python/2.7/test/test_audioop.py --- a/lib-python/2.7/test/test_audioop.py +++ b/lib-python/2.7/test/test_audioop.py @@ -1,6 +1,6 @@ import audioop import unittest -from test.test_support import run_unittest +from test.test_support import run_unittest, impl_detail endian = 'big' if audioop.getsample('\0\1', 2, 0) == 1 else 'little' @@ -93,21 +93,25 @@ wtd = len(d2)//3 self.assertEqual(len(audioop.lin2lin(d1, got, wtd)), len(d2)) + @impl_detail(pypy=False) def test_adpcm2lin(self): # Very cursory test self.assertEqual(audioop.adpcm2lin(b'\0\0', 1, None), (b'\0' * 4, (0,0))) self.assertEqual(audioop.adpcm2lin(b'\0\0', 2, None), (b'\0' * 8, (0,0))) self.assertEqual(audioop.adpcm2lin(b'\0\0', 4, None), (b'\0' * 16, (0,0))) + @impl_detail(pypy=False) def test_lin2adpcm(self): # Very cursory test self.assertEqual(audioop.lin2adpcm('\0\0\0\0', 1, None), ('\0\0', (0,0))) + @impl_detail(pypy=False) def test_lin2alaw(self): self.assertEqual(audioop.lin2alaw(data[0], 1), '\xd5\xc5\xf5') self.assertEqual(audioop.lin2alaw(data[1], 2), '\xd5\xd5\xd5') self.assertEqual(audioop.lin2alaw(data[2], 4), '\xd5\xd5\xd5') + @impl_detail(pypy=False) def test_alaw2lin(self): # Cursory d = audioop.lin2alaw(data[0], 1) @@ -123,11 +127,13 @@ self.assertEqual(audioop.alaw2lin(d, 4), b'\x00\x00\x08\x00\x00\x00\x08\x01\x00\x00\x10\x02') + @impl_detail(pypy=False) def test_lin2ulaw(self): self.assertEqual(audioop.lin2ulaw(data[0], 1), '\xff\xe7\xdb') self.assertEqual(audioop.lin2ulaw(data[1], 2), '\xff\xff\xff') self.assertEqual(audioop.lin2ulaw(data[2], 4), '\xff\xff\xff') + @impl_detail(pypy=False) def test_ulaw2lin(self): # Cursory d = audioop.lin2ulaw(data[0], 1) @@ -195,6 +201,7 @@ self.assertRaises(audioop.error, audioop.findmax, ''.join( chr(x) for x in xrange(256)), -2392392) + @impl_detail(pypy=False) def test_issue7673(self): state = None for data, size in INVALID_DATA: @@ -219,6 +226,7 @@ self.assertRaises(audioop.error, audioop.lin2alaw, data, size) self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state) + @impl_detail(pypy=False) def test_wrongsize(self): data = b'abc' state = None diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -129,9 +129,13 @@ fp = os.tmpfile() except OSError, second: self.assertEqual(first.args, second.args) + return else: - self.fail("expected os.tmpfile() to raise OSError") - return + if test_support.check_impl_detail(pypy=False): + self.fail("expected os.tmpfile() to raise OSError") + # on PyPy, os.tmpfile() uses the tempfile module + # anyway, so works even if we cannot write in root. + fp.close() else: # open() worked, therefore, tmpfile() should work. Close our # dummy file and proceed with the test as normal. diff --git a/lib-python/2.7/threading.py b/lib-python/2.7/threading.py --- a/lib-python/2.7/threading.py +++ b/lib-python/2.7/threading.py @@ -244,22 +244,18 @@ if __debug__: self._note("%s.wait(): got it", self) else: - # Balancing act: We can't afford a pure busy loop, so we - # have to sleep; but if we sleep the whole timeout time, - # we'll be unresponsive. The scheme here sleeps very - # little at first, longer as time goes on, but never longer - # than 20 times per second (or the timeout time remaining). - endtime = _time() + timeout - delay = 0.0005 # 500 us -> initial delay of 1 ms - while True: - gotit = waiter.acquire(0) - if gotit: - break - remaining = endtime - _time() - if remaining <= 0: - break - delay = min(delay * 2, remaining, .05) - _sleep(delay) + # PyPy patch: use _py3k_acquire() + if timeout > 0: + try: + gotit = waiter._py3k_acquire(True, timeout) + except OverflowError: + # bah, in Python 3, acquire(True, timeout) raises + # OverflowError if the timeout is too huge. For + # forward-compatibility reasons we do the same. + waiter.acquire() + gotit = True + else: + gotit = waiter.acquire(False) if not gotit: if __debug__: self._note("%s.wait(%s): timed out", self, timeout) diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="incomplete module"), + RegrTest('test_audioop.py'), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -1,5 +1,11 @@ +import __builtin__ as builtins +import math +import struct +from fractions import gcd +from ctypes import create_string_buffer -import struct + +_buffer = buffer class error(Exception): @@ -8,7 +14,7 @@ def _check_size(size): if size != 1 and size != 2 and size != 4: - raise error("Size should be 1, 2 or 4") + raise error("Size should be 1, 2 or 4") def _check_params(length, size): @@ -17,13 +23,524 @@ raise error("not a whole number of frames") +def _sample_count(cp, size): + return len(cp) / size + + +def _get_samples(cp, size, signed=True): + for i in range(_sample_count(cp, size)): + yield _get_sample(cp, size, i, signed) + + +def _struct_format(size, signed): + if size == 1: + return "b" if signed else "B" + elif size == 2: + return "h" if signed else "H" + elif size == 4: + return "i" if signed else "I" + + +def _get_sample(cp, size, i, signed=True): + fmt = _struct_format(size, signed) + start = i * size + end = start + size + return struct.unpack_from(fmt, _buffer(cp)[start:end])[0] + + +def _put_sample(cp, size, i, val, signed=True): + fmt = _struct_format(size, signed) + struct.pack_into(fmt, cp, i * size, val) + + +def _get_maxval(size, signed=True): + if signed and size == 1: + return 0x7f + elif size == 1: + return 0xff + elif signed and size == 2: + return 0x7fff + elif size == 2: + return 0xffff + elif signed and size == 4: + return 0x7fffffff + elif size == 4: + return 0xffffffff + + +def _get_minval(size, signed=True): + if not signed: + return 0 + elif size == 1: + return -0x80 + elif size == 2: + return -0x8000 + elif size == 4: + return -0x80000000 + + +def _get_clipfn(size, signed=True): + maxval = _get_maxval(size, signed) + minval = _get_minval(size, signed) + return lambda val: builtins.max(min(val, maxval), minval) + + +def _overflow(val, size, signed=True): + minval = _get_minval(size, signed) + maxval = _get_maxval(size, signed) + if minval <= val <= maxval: + return val + + bits = size * 8 + if signed: + offset = 2**(bits-1) + return ((val + offset) % (2**bits)) - offset + else: + return val % (2**bits) + + def getsample(cp, size, i): _check_params(len(cp), size) if not (0 <= i < len(cp) / size): raise error("Index out of range") - if size == 1: - return struct.unpack_from("B", buffer(cp)[i:])[0] - elif size == 2: - return struct.unpack_from("H", buffer(cp)[i * 2:])[0] - elif size == 4: - return struct.unpack_from("I", buffer(cp)[i * 4:])[0] + return _get_sample(cp, size, i) + + +def max(cp, size): + _check_params(len(cp), size) + + if len(cp) == 0: + return 0 + + return builtins.max(abs(sample) for sample in _get_samples(cp, size)) + + +def minmax(cp, size): + _check_params(len(cp), size) + + max_sample, min_sample = 0, 0 + for sample in _get_samples(cp, size): + max_sample = builtins.max(sample, max_sample) + min_sample = builtins.min(sample, min_sample) + + return min_sample, max_sample + + +def avg(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + return sum(_get_samples(cp, size)) / sample_count + + +def rms(cp, size): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + + sum_squares = sum(sample**2 for sample in _get_samples(cp, size)) + return int(math.sqrt(sum_squares / sample_count)) + + +def _sum2(cp1, cp2, length): + size = 2 + return sum(getsample(cp1, size, i) * getsample(cp2, size, i) + for i in range(length)) + + +def findfit(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0 or len(cp2) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) < len(cp2): + raise error("First sample should be longer") + + len1 = _sample_count(cp1, size) + len2 = _sample_count(cp2, size) + + sum_ri_2 = _sum2(cp2, cp2, len2) + sum_aij_2 = _sum2(cp1, cp1, len2) + sum_aij_ri = _sum2(cp1, cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + best_result = result + best_i = 0 + + for i in range(1, len1 - len2 + 1): + aj_m1 = _get_sample(cp1, size, i - 1) + aj_lm1 = _get_sample(cp1, size, i + len2 - 1) + + sum_aij_2 += aj_lm1**2 - aj_m1**2 + sum_aij_ri = _sum2(_buffer(cp1)[i*size:], cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + if result < best_result: + best_result = result + best_i = i + + factor = _sum2(_buffer(cp1)[best_i*size:], cp2, len2) / sum_ri_2 + + return best_i, factor + + +def findfactor(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) != len(cp2): + raise error("Samples should be same size") + + sample_count = _sample_count(cp1, size) + + sum_ri_2 = _sum2(cp2, cp2, sample_count) + sum_aij_ri = _sum2(cp1, cp2, sample_count) + + return sum_aij_ri / sum_ri_2 + + +def findmax(cp, len2): + size = 2 + sample_count = _sample_count(cp, size) + + if len(cp) % 2 != 0: + raise error("Strings should be even-sized") + + if len2 < 0 or sample_count < len2: + raise error("Input sample should be longer") + + if sample_count == 0: + return 0 + + result = _sum2(cp, cp, len2) + best_result = result + best_i = 0 + + for i in range(1, sample_count - len2 + 1): + sample_leaving_window = getsample(cp, size, i - 1) + sample_entering_window = getsample(cp, size, i + len2 - 1) + + result -= sample_leaving_window**2 + result += sample_entering_window**2 + + if result > best_result: + best_result = result + best_i = i + + return best_i + + +def avgpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + prevextreme = None + avg = 0 + nextreme = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + avg += abs(prevval - prevextreme) + nextreme += 1 + + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + if nextreme == 0: + return 0 + + return avg / nextreme + + +def maxpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + prevextreme = None + max = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + extremediff = abs(prevval - prevextreme) + if extremediff > max: + max = extremediff + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + return max + + +def cross(cp, size): + _check_params(len(cp), size) + + crossings = 0 + last_sample = 0 + for sample in _get_samples(cp, size): + if sample <= 0 < last_sample or sample >= 0 > last_sample: + crossings += 1 + last_sample = sample + + return crossings + + +def mul(cp, size, factor): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = clip(int(sample * factor)) + _put_sample(result, size, i, sample) + + return result.raw + + +def tomono(cp, size, fac1, fac2): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) / 2) + + for i in range(0, sample_count, 2): + l_sample = getsample(cp, size, i) + r_sample = getsample(cp, size, i + 1) + + sample = (l_sample * fac1) + (r_sample * fac2) + sample = clip(sample) + + _put_sample(result, size, i / 2, sample) + + return result.raw + + +def tostereo(cp, size, fac1, fac2): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) * 2) + clip = _get_clipfn(size) + + for i in range(sample_count): + sample = _get_sample(cp, size, i) + + l_sample = clip(sample * fac1) + r_sample = clip(sample * fac2) + + _put_sample(result, size, i * 2, l_sample) + _put_sample(result, size, i * 2 + 1, r_sample) + + return result.raw + + +def add(cp1, cp2, size): + _check_params(len(cp1), size) + + if len(cp1) != len(cp2): + raise error("Lengths should be the same") + + clip = _get_clipfn(size) + sample_count = _sample_count(cp1, size) + result = create_string_buffer(len(cp1)) + + for i in range(sample_count): + sample1 = getsample(cp1, size, i) + sample2 = getsample(cp2, size, i) + + sample = clip(sample1 + sample2) + + _put_sample(result, size, i, sample) + + return result.raw + + +def bias(cp, size, bias): + _check_params(len(cp), size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = _overflow(sample + bias, size) + _put_sample(result, size, i, sample) + + return result.raw + + +def reverse(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp)) + for i, sample in enumerate(_get_samples(cp, size)): + _put_sample(result, size, sample_count - i - 1, sample) + + return result.raw + + +def lin2lin(cp, size, size2): + _check_params(len(cp), size) + _check_size(size2) + + if size == size2: + return cp + + new_len = (len(cp) / size) * size2 + + result = create_string_buffer(new_len) + + for i in range(_sample_count(cp, size)): + sample = _get_sample(cp, size, i) + if size < size2: + sample = sample << (4 * size2 / size) + elif size > size2: + sample = sample >> (4 * size / size2) + + sample = _overflow(sample, size2) + + _put_sample(result, size2, i, sample) + + return result.raw + + +def ratecv(cp, size, nchannels, inrate, outrate, state, weightA=1, weightB=0): + _check_params(len(cp), size) + if nchannels < 1: + raise error("# of channels should be >= 1") + + bytes_per_frame = size * nchannels + frame_count = len(cp) / bytes_per_frame + + if bytes_per_frame / nchannels != size: + raise OverflowError("width * nchannels too big for a C int") + + if weightA < 1 or weightB < 0: + raise error("weightA should be >= 1, weightB should be >= 0") + + if len(cp) % bytes_per_frame != 0: + raise error("not a whole number of frames") + + if inrate <= 0 or outrate <= 0: + raise error("sampling rate not > 0") + + d = gcd(inrate, outrate) + inrate /= d + outrate /= d + + prev_i = [0] * nchannels + cur_i = [0] * nchannels + + if state is None: + d = -outrate + else: + d, samps = state + + if len(samps) != nchannels: + raise error("illegal state argument") + + prev_i, cur_i = zip(*samps) + prev_i, cur_i = list(prev_i), list(cur_i) + + q = frame_count / inrate + ceiling = (q + 1) * outrate + nbytes = ceiling * bytes_per_frame + + result = create_string_buffer(nbytes) + + samples = _get_samples(cp, size) + out_i = 0 + while True: + while d < 0: + if frame_count == 0: + samps = zip(prev_i, cur_i) + retval = result.raw + + # slice off extra bytes + trim_index = (out_i * bytes_per_frame) - len(retval) + retval = _buffer(retval)[:trim_index] + + return (retval, (d, tuple(samps))) + + for chan in range(nchannels): + prev_i[chan] = cur_i[chan] + cur_i[chan] = next(samples) + + cur_i[chan] = ( + (weightA * cur_i[chan] + weightB * prev_i[chan]) + / (weightA + weightB) + ) + + frame_count -= 1 + d += outrate + + while d >= 0: + for chan in range(nchannels): + cur_o = ( + (prev_i[chan] * d + cur_i[chan] * (outrate - d)) + / outrate + ) + _put_sample(result, size, out_i, _overflow(cur_o, size)) + out_i += 1 + d -= inrate + + +def lin2ulaw(cp, size): + raise NotImplementedError() + + +def ulaw2lin(cp, size): + raise NotImplementedError() + + +def lin2alaw(cp, size): + raise NotImplementedError() + + +def alaw2lin(cp, size): + raise NotImplementedError() + + +def lin2adpcm(cp, size, state): + raise NotImplementedError() + + +def adpcm2lin(cp, size, state): + raise NotImplementedError() diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.1" -__version_info__ = (0, 8, 1) +__version__ = "0.8.2" +__version_info__ = (0, 8, 2) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,4 @@ -import types +import sys, types from .lock import allocate_lock try: @@ -88,18 +88,20 @@ self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() - def cdef(self, csource, override=False): + def cdef(self, csource, override=False, packed=False): """Parse the given C source. This registers all declared functions, types, and global variables. The functions and global variables can then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. The types can be used in 'ffi.new()' and other functions. + If 'packed' is specified as True, all structs declared inside this + cdef are packed, i.e. laid out without any field alignment at all. """ if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: - self._parser.parse(csource, override=override) + self._parser.parse(csource, override=override, packed=packed) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -387,22 +389,27 @@ return self._backend.from_handle(x) -def _make_ffi_library(ffi, libname, flags): - import os - name = libname +def _load_backend_lib(backend, name, flags): if name is None: - name = 'c' # on Posix only - backend = ffi._backend + if sys.platform != "win32": + return backend.load_library(None, flags) + name = "c" # Windows: load_library(None) fails, but this works + # (backward compatibility hack only) try: if '.' not in name and '/' not in name: raise OSError("library not found: %r" % (name,)) - backendlib = backend.load_library(name, flags) + return backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: raise # propagate the original OSError - backendlib = backend.load_library(path, flags) + return backend.load_library(path, flags) + +def _make_ffi_library(ffi, libname, flags): + import os + backend = ffi._backend + backendlib = _load_backend_lib(backend, libname, flags) copied_enums = [] # def make_accessor_locked(name): diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -720,7 +720,7 @@ return self._new_struct_or_union('union', name, ctypes.Union) def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, - totalsize=-1, totalalignment=-1): + totalsize=-1, totalalignment=-1, sflags=0): if totalsize >= 0 or totalalignment >= 0: raise NotImplementedError("the ctypes backend of CFFI does not support " "structures completed by verify(); please " @@ -739,6 +739,8 @@ else: cfields.append((fname, BField._ctype, bitsize)) bfield_types[fname] = Ellipsis + if sflags & 8: + struct_or_union._pack_ = 1 struct_or_union._fields_ = cfields CTypesStructOrUnion._bfield_types = bfield_types # diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -98,6 +98,7 @@ self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() self._override = False + self._packed = False def _parse(self, csource): csource, macros = _preprocess(csource) @@ -147,13 +148,16 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False): + def parse(self, csource, override=False, packed=False): prev_override = self._override + prev_packed = self._packed try: self._override = override + self._packed = packed self._internal_parse(csource) finally: self._override = prev_override + self._packed = prev_packed def _internal_parse(self, csource): ast, macros = self._parse(csource) @@ -476,6 +480,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) + tp.packed = self._packed return tp def _make_partial(self, tp, nested): diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,4 +1,6 @@ +import types import weakref + from .lock import allocate_lock @@ -81,29 +83,29 @@ 'long': 'i', 'long long': 'i', 'signed char': 'i', - 'unsigned char': 'u', - 'unsigned short': 'u', - 'unsigned int': 'u', - 'unsigned long': 'u', - 'unsigned long long': 'u', + 'unsigned char': 'i', + 'unsigned short': 'i', + 'unsigned int': 'i', + 'unsigned long': 'i', + 'unsigned long long': 'i', 'float': 'f', 'double': 'f', 'long double': 'f', - '_Bool': 'u', + '_Bool': 'i', # the following types are not primitive in the C sense 'wchar_t': 'c', 'int8_t': 'i', - 'uint8_t': 'u', + 'uint8_t': 'i', 'int16_t': 'i', - 'uint16_t': 'u', + 'uint16_t': 'i', 'int32_t': 'i', - 'uint32_t': 'u', + 'uint32_t': 'i', 'int64_t': 'i', - 'uint64_t': 'u', + 'uint64_t': 'i', 'intptr_t': 'i', - 'uintptr_t': 'u', + 'uintptr_t': 'i', 'ptrdiff_t': 'i', - 'size_t': 'u', + 'size_t': 'i', 'ssize_t': 'i', } @@ -114,12 +116,8 @@ def is_char_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' - def is_signed_type(self): + def is_integer_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' - def is_unsigned_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'u' - def is_integer_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] in 'iu' def is_float_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' @@ -259,6 +257,7 @@ fixedlayout = None completed = False partial = False + packed = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -315,7 +314,11 @@ fldtypes = [tp.get_cached_btype(ffi, finishlist) for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) - ffi._backend.complete_struct_or_union(BType, lst, self) + sflags = 0 + if self.packed: + sflags = 8 # SF_PACKED + ffi._backend.complete_struct_or_union(BType, lst, self, + -1, -1, sflags) # else: fldtypes = [] @@ -468,8 +471,7 @@ # initialize the __typecache attribute, either at the module level # if ffi._backend is a module, or at the class level if ffi._backend # is some instance. - ModuleType = type(weakref) - if isinstance(ffi._backend, ModuleType): + if isinstance(ffi._backend, types.ModuleType): ffi._backend.__typecache = weakref.WeakValueDictionary() else: type(ffi._backend).__typecache = weakref.WeakValueDictionary() diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -214,10 +214,7 @@ extraarg = '' if isinstance(tp, model.PrimitiveType): if tp.is_integer_type() and tp.name != '_Bool': - if tp.is_signed_type(): - converter = '_cffi_to_c_SIGNED' - else: - converter = '_cffi_to_c_UNSIGNED' + converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) @@ -270,10 +267,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.PrimitiveType): if tp.is_integer_type(): - if tp.is_signed_type(): - return '_cffi_from_c_SIGNED(%s, %s)' % (var, tp.name) - else: - return '_cffi_from_c_UNSIGNED(%s, %s)' % (var, tp.name) + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif tp.name != 'long double': return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) else: @@ -801,25 +795,23 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble -#define _cffi_from_c_SIGNED(x, type) \ - (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ - PyLong_FromLongLong(x)) -#define _cffi_from_c_UNSIGNED(x, type) \ - (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ - sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ - PyLong_FromUnsignedLongLong(x)) +#define _cffi_from_c_int(x, type) \ + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ + sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ + PyLong_FromUnsignedLongLong(x)) \ + : (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ + PyLong_FromLongLong(x))) -#define _cffi_to_c_SIGNED(o, type) \ - (sizeof(type) == 1 ? _cffi_to_c_i8(o) : \ - sizeof(type) == 2 ? _cffi_to_c_i16(o) : \ - sizeof(type) == 4 ? _cffi_to_c_i32(o) : \ - sizeof(type) == 8 ? _cffi_to_c_i64(o) : \ - (Py_FatalError("unsupported size for type " #type), 0)) -#define _cffi_to_c_UNSIGNED(o, type) \ - (sizeof(type) == 1 ? _cffi_to_c_u8(o) : \ - sizeof(type) == 2 ? _cffi_to_c_u16(o) : \ - sizeof(type) == 4 ? _cffi_to_c_u32(o) : \ - sizeof(type) == 8 ? _cffi_to_c_u64(o) : \ +#define _cffi_to_c_int(o, type) \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ + : _cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ + : _cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ + : _cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ + : _cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -905,11 +897,13 @@ if (c_api_object == NULL) return; if (!PyCapsule_CheckExact(c_api_object)) { + Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); return; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + Py_DECREF(c_api_object); } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -250,14 +250,16 @@ IntOption("methodcachesizeexp", " 2 ** methodcachesizeexp is the size of the of the method cache ", default=11), - BoolOption("optimized_int_add", - "special case the addition of two integers in BINARY_ADD", + BoolOption("intshortcut", + "special case addition and subtraction of two integers in BINARY_ADD/" + "/BINARY_SUBTRACT and their inplace counterparts", default=False), BoolOption("optimized_list_getitem", "special case the 'list[integer]' expressions", default=False), BoolOption("builtinshortcut", - "a shortcut for operations between built-in types", + "a shortcut for operations between built-in types. XXX: " + "deprecated, not really a shortcut any more.", default=False), BoolOption("getattributeshortcut", "track types that override __getattribute__", @@ -301,7 +303,7 @@ config.objspace.std.suggest(withrangelist=True) config.objspace.std.suggest(withmethodcache=True) config.objspace.std.suggest(withprebuiltchar=True) - config.objspace.std.suggest(builtinshortcut=True) + config.objspace.std.suggest(intshortcut=True) config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) #config.objspace.std.suggest(newshortcut=True) diff --git a/pypy/doc/config/objspace.std.intshortcut.txt b/pypy/doc/config/objspace.std.intshortcut.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.std.intshortcut.txt @@ -0,0 +1,2 @@ +Optimize the addition and subtraction of two integers. Enabling this +option gives small speedups. diff --git a/pypy/doc/config/objspace.std.optimized_int_add.txt b/pypy/doc/config/objspace.std.optimized_int_add.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.optimized_int_add.txt +++ /dev/null @@ -1,2 +0,0 @@ -Optimize the addition of two integers a bit. Enabling this option gives small -speedups. diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/embedding.rst @@ -0,0 +1,174 @@ + +Embedding PyPy +-------------- + +PyPy has a very minimal and a very strange embedding interface, based on +the usage of `cffi`_ and the philosophy that Python is a better language than +C. It was developed in collaboration with Roberto De Ioris from the `uwsgi`_ +project. The `PyPy uwsgi plugin`_ is a good example of using the embedding API. + +The first thing that you need is to compile PyPy yourself with the option +``--shared``. We plan to make ``--shared`` the default in the future. Consult +the `how to compile PyPy`_ doc for details. This will result in ``libpypy.so`` +or ``pypy.dll`` file or something similar, depending on your platform. Consult +your platform specification for details. + +The resulting shared library exports very few functions, however they are +enough to accomplish everything you need, provided you follow a few principles. +The API is: + +.. function:: void rpython_startup_code(void); + + This is a function that you have to call (once) before calling anything else. + It initializes the RPython/PyPy GC and does a bunch of necessary startup + code. This function cannot fail. + +.. function:: void pypy_init_threads(void); + + Initialize threads. Only need to be called if there are any threads involved + +.. function:: long pypy_setup_home(char* home, int verbose); + + This function searches the PyPy standard library starting from the given + "PyPy home directory". It is not strictly necessary to execute it before + running Python code, but without it you will not be able to import any + non-builtin module from the standard library. The arguments are: + + * ``home``: NULL terminated path to an executable inside the pypy directory + (can be a .so name, can be made up) + + * ``verbose``: if non-zero, it will print error messages to stderr + + Function returns 0 on success or -1 on failure, can be called multiple times + until the library is found. + +.. function:: int pypy_execute_source(char* source); + + Execute the Python source code given in the ``source`` argument. In case of + exceptions, it will print the Python traceback to stderr and return 1, + otherwise return 0. You should really do your own error handling in the + source. It'll acquire the GIL. + +.. function:: int pypy_execute_source_ptr(char* source, void* ptr); + + Just like the above, except it registers a magic argument in the source + scope as ``c_argument``, where ``void*`` is encoded as Python int. + +.. function:: void pypy_thread_attach(void); + + In case your application uses threads that are initialized outside of PyPy, + you need to call this function to tell the PyPy GC to track this thread. + Note that this function is not thread-safe itself, so you need to guard it + with a mutex. + +Simple example +-------------- + +Note that this API is a lot more minimal than say CPython C API, so at first +it's obvious to think that you can't do much. However, the trick is to do +all the logic in Python and expose it via `cffi`_ callbacks. Let's assume +we're on linux and pypy is installed in ``/opt/pypy`` with the +library in ``/opt/pypy/bin/libpypy-c.so``. (It doesn't need to be +installed; you can also replace this path with your local checkout.) +We write a little C program: + +.. code-block:: c + + #include "include/PyPy.h" + #include + + const char source[] = "print 'hello from pypy'"; + + int main() + { + int res; + + rpython_startup_code(); + // pypy_setup_home() is not needed in this trivial example + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + +If we save it as ``x.c`` now, compile it and run it with:: + + fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. + fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x + hello from pypy + +Worked! + +More advanced example +--------------------- + +Typically we need something more to do than simply execute source. The following +is a fully fledged example, please consult cffi documentation for details. +It's a bit longish, but it captures a gist what can be done with the PyPy +embedding interface: + +.. code-block:: c + + #include "include/PyPy.h" + #include + + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + c_func = ffi.cast('int(*)(int(*)(int))', c_argument)\n\ + c_func(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + res = pypy_execute_source_ptr(source, (void*)callback); + if (res) { + printf("Error calling pypy_execute_source_ptr!\n"); + } + return res; + } + +you can compile and run it with:: + + fijal at hermann:/opt/pypy$ gcc -g -o x x.c -lpypy-c -L. + fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x + Got from C 3 + Calling to Python, result: 6 + finished the Python part + +As you can see, we successfully managed to call Python from C and C from +Python. Now having one callback might not be enough, so what typically happens +is that we would pass a struct full of callbacks to ``pypy_execute_source_ptr`` +and fill the structure from Python side for the future use. + +Threading +--------- + +In case you want to use pthreads, what you need to do is to call +``pypy_thread_attach`` from each of the threads that you created (but not +from the main thread) and call ``pypy_init_threads`` from the main thread. + +.. _`cffi`: http://cffi.readthedocs.org/ +.. _`uwsgi`: http://uwsgi-docs.readthedocs.org/en/latest/ +.. _`PyPy uwsgi plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html +.. _`how to compile PyPy`: getting-started.html diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -267,7 +267,7 @@ .. _`day 1`: http://codespeak.net/pipermail/pypy-dev/2005q2/002169.html .. _`day 2`: http://codespeak.net/pipermail/pypy-dev/2005q2/002171.html .. _`day 3`: http://codespeak.net/pipermail/pypy-dev/2005q2/002172.html -.. _`pypy-dev`: http://python.org/mailman/listinfo/pypy-dev +.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev .. _EuroPython: http://europython.org .. _`translation`: translation.html diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -103,8 +103,7 @@ .. _`extension modules`: cpython_differences.html#extension-modules .. _`cpython differences`: cpython_differences.html -.. _`compatibility wiki`: -.. https://bitbucket.org/pypy/compatibility/wiki/Home +.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home .. _cffi: http://cffi.readthedocs.org/ --------------------------------- @@ -244,7 +243,7 @@ discussions. .. _`contact us`: index.html -.. _`mailing list`: http://python.org/mailman/listinfo/pypy-dev +.. _`mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev ------------------------------------------------------------- OSError: ... cannot restore segment prot after reloc... Help? @@ -426,25 +425,12 @@ Could we use LLVM? ------------------ -In theory yes. But we tried to use it 5 or 6 times already, as a -translation backend or as a JIT backend --- and failed each time. +There is a (static) translation backend using LLVM in the branch +``llvm-translation-backend``. It can translate PyPy with or without the JIT on +Linux. -In more details: using LLVM as a (static) translation backend is -pointless nowadays because you can generate C code and compile it with -clang. (Note that compiling PyPy with clang gives a result that is not -faster than compiling it with gcc.) We might in theory get extra -benefits from LLVM's GC integration, but this requires more work on the -LLVM side before it would be remotely useful. Anyway, it could be -interfaced via a custom primitive in the C code. - -On the other hand, using LLVM as our JIT backend looks interesting as -well --- but again we made an attempt, and it failed: LLVM has no way to -patch the generated machine code. - -So the position of the core PyPy developers is that if anyone wants to -make an N+1'th attempt with LLVM, they are welcome, and will be happy to -provide help in the IRC channel, but they are left with the burden of proof -that (a) it works and (b) it gives important benefits. +Using LLVM as our JIT backend looks interesting as well -- we made an attempt, +but it failed: LLVM has no way to patch the generated machine code. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -315,27 +315,27 @@ .. _`trace example`: -Tracing bytecode and operations on objects -++++++++++++++++++++++++++++++++++++++++++ +Tracing bytecodes ++++++++++++++++++ -You can use the trace object space to monitor the interpretation -of bytecodes in connection with object space operations. To enable -it, set ``__pytrace__=1`` on the interactive PyPy console:: +You can use a simple tracing mode to monitor the interpretation of +bytecodes. To enable it, set ``__pytrace__ = 1`` on the interactive +PyPy console:: >>>> __pytrace__ = 1 Tracing enabled - >>>> a = 1 + 2 - |- <<<< enter a = 1 + 2 @ 1 >>>> - |- 0 LOAD_CONST 0 (W_IntObject(1)) - |- 3 LOAD_CONST 1 (W_IntObject(2)) - |- 6 BINARY_ADD - |- add(W_IntObject(1), W_IntObject(2)) -> W_IntObject(3) - |- 7 STORE_NAME 0 (a) - |- hash(W_StringObject('a')) -> W_IntObject(-468864544) - |- int_w(W_IntObject(-468864544)) -> -468864544 - |-10 LOAD_CONST 2 () - |-13 RETURN_VALUE - |- <<<< leave a = 1 + 2 @ 1 >>>> + >>>> x = 5 + : LOAD_CONST 0 (5) + : STORE_NAME 0 (x) + : LOAD_CONST 1 (None) + : RETURN_VALUE 0 + >>>> x + : LOAD_NAME 0 (x) + : PRINT_EXPR 0 + 5 + : LOAD_CONST 0 (None) + : RETURN_VALUE 0 + >>>> Demos ------- @@ -386,7 +386,7 @@ .. _`full Python interpreter`: getting-started-python.html .. _`the blog`: http://morepypy.blogspot.com -.. _`pypy-dev mailing list`: http://python.org/mailman/listinfo/pypy-dev +.. _`pypy-dev mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`contact possibilities`: index.html .. _`py library`: http://pylib.org diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -145,11 +145,13 @@ After you successfully manage to get PyPy's source you can read more about: - `Building and using PyPy's Python interpreter`_ + - `Embedding PyPy`_ - `Learning more about the RPython toolchain and how to develop (with) PyPy`_ - `Tutorial for how to write an interpreter with the RPython toolchain and make it fast`_ - `Look at our benchmark results`_ .. _`Building and using PyPy's Python interpreter`: getting-started-python.html +.. _`Embedding PyPy`: embedding.html .. _`Learning more about the RPython toolchain and how to develop (with) PyPy`: getting-started-dev.html .. _`Tutorial for how to write an interpreter with the RPython toolchain and make it fast`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html .. _`Look at our benchmark results`: http://speed.pypy.org diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -99,7 +99,7 @@ .. _`py-lib`: http://pylib.org/ .. _`py.test`: http://pytest.org/ .. _codespeak: http://codespeak.net/ -.. _`pypy-dev`: http://python.org/mailman/listinfo/pypy-dev +.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev Reports of 2006 diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -104,8 +104,8 @@ .. _`PyPy blog`: http://morepypy.blogspot.com/ .. _`development bug/feature tracker`: https://bugs.pypy.org .. _here: http://tismerysoft.de/pypy/irc-logs/pypy -.. _`Mercurial commit mailing list`: http://python.org/mailman/listinfo/pypy-commit -.. _`development mailing list`: http://python.org/mailman/listinfo/pypy-dev +.. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit +.. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -19,10 +19,10 @@ Skip assert statements. -OO - Remove docstrings when importing modules in addition to -O. + Remove docstrings when importing modules in addition to ``-O``. --c *cmd* - Program passed in as CMD (terminates option list). +-c CMD + Program passed in as ``CMD`` (terminates option list). -S Do not ``import site`` on initialization. @@ -36,10 +36,10 @@ -h, --help Show a help message and exit. --m *mod* +-m MOD Library module to be run as a script (terminates option list). --W *arg* +-W ARG Warning control (*arg* is *action*:*message*:*category*:*module*:*lineno*). -E @@ -54,44 +54,9 @@ --info Print translation information about this PyPy executable. ---jit *arg* - Low level JIT parameters. Format is - *arg*\ ``=``\ *value*\ [``,``\ *arg*\ ``=``\ *value*\ ...] - - ``off`` - Disable the JIT. - - ``threshold=``\ *value* - Number of times a loop has to run for it to become hot. - - ``function_threshold=``\ *value* - Number of times a function must run for it to become traced from - start. - - ``inlining=``\ *value* - Inline python functions or not (``1``/``0``). - - ``loop_longevity=``\ *value* - A parameter controlling how long loops will be kept before being - freed, an estimate. - - ``max_retrace_guards=``\ *value* - Number of extra guards a retrace can cause. - - ``retrace_limit=``\ *value* - How many times we can try retracing before giving up. - - ``trace_eagerness=``\ *value* - Number of times a guard has to fail before we start compiling a - bridge. - - ``trace_limit=``\ *value* - Number of recorded operations before we abort tracing with - ``ABORT_TRACE_TOO_LONG``. - - ``enable_opts=``\ *value* - Optimizations to enabled or ``all``. - Warning, this option is dangerous, and should be avoided. +--jit ARG + Low level JIT parameters. Mostly internal. Run ``--jit help`` + for more information. ENVIRONMENT =========== @@ -144,7 +109,7 @@ Multiple prefixes can be specified, comma-separated. Only sections whose name match the prefix will be logged. - ``PYPYLOG``\ =\ ``jit-log-opt,jit-backend:``\ *logfile* will + ``PYPYLOG=jit-log-opt,jit-backend:logfile`` will generate a log suitable for *jitviewer*, a tool for debugging performance issues under PyPy. diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -11,7 +11,7 @@ `mailing list`_. This is simply for the reason that small possible projects tend to change very rapidly. -This list is mostly for having on overview on potential projects. This list is +This list is mostly for having an overview on potential projects. This list is by definition not exhaustive and we're pleased if people come up with their own improvement ideas. In any case, if you feel like working on some of those projects, or anything else in PyPy, pop up on IRC or write to us on the @@ -71,7 +71,7 @@ different ways to represent a unicode string, depending on whether the string fits into ASCII, has only two-byte characters or needs four-byte characters. -The actual details would be rather differen in PyPy, but we would like to have +The actual details would be rather different in PyPy, but we would like to have the same optimization implemented. Or maybe not. We can also play around with the idea of using a single @@ -142,32 +142,19 @@ * `hg` -Experiment (again) with LLVM backend for RPython compilation ------------------------------------------------------------- - -We already tried working with LLVM and at the time, LLVM was not mature enough -for our needs. It's possible that this has changed, reviving the LLVM backend -(or writing new from scratch) for static compilation would be a good project. - -(On the other hand, just generating C code and using clang might be enough. -The issue with that is the so-called "asmgcc GC root finder", which has tons -of issues of this own. In my opinion (arigo), it would be definitely a -better project to try to optimize the alternative, the "shadowstack" GC root -finder, which is nicely portable. So far it gives a pypy that is around -7% slower.) - -Embedding PyPy +Embedding PyPy and improving CFFI ---------------------------------------- Note: there is a basic proof-of-concept for that as a `uwsgi pypy plugin`_ Being able to embed PyPy, say with its own limited C API, would be -useful. But here is the most interesting variant, straight from -EuroPython live discussion :-) We can have a generic "libpypy.so" that -can be used as a placeholder dynamic library, and when it gets loaded, -it runs a .py module that installs (via ctypes) the interface it wants -exported. This would give us a one-size-fits-all generic .so file to be -imported by any application that wants to load .so files :-) +useful. But there is a possibly better variant: use CFFI. With some +minimal tools atop CFFI, it would be possible to write a pure Python +library, and then compile automatically from it an .so/.dll file that is +a dynamic-link library with whatever C API we want. This gives us a +one-size-fits-all generic way to make .so/.dll files from Python. + +This would fit well in a "redesign CFFI" work. .. _`uwsgi pypy plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -57,3 +57,45 @@ mapdicts keep track of whether or not an attribute is every assigned to multiple times. If it's only assigned once then an elidable lookup is used when possible. + +.. branch: precompiled-headers +Create a Makefile using precompiled headers for MSVC platforms. +The downside is a messy nmake-compatible Makefile. Since gcc shows minimal +speedup, it was not implemented. + +.. branch: camelot +With a properly configured 256-color terminal (TERM=...-256color), the +Mandelbrot set shown during translation now uses a range of 50 colours. +Essential! + +.. branch: NonConstant +Simplify implementation of NonConstant. + +.. branch: array-propagate-len +Kill some guards and operations in JIT traces by adding integer bounds +propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). + +.. branch: optimize-int-and +Optimize away INT_AND with constant mask of 1s that fully cover the bitrange +of other operand. + +.. branch: bounds-int-add-or +Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the +operands are positive to kill some guards + +.. branch: remove-intlong-smm +kills int/long/smalllong/bool multimethods + +.. branch: numpy-refactor +Cleanup micronumpy module + +.. branch: int_w-refactor +In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. + +.. branch: test-58c3d8552833 +Fix for getarrayitem_gc_pure optimization + +.. branch: simple-range-strategy +Implements SimpleRangeListStrategy for case range(n) where n is a positive number. +Makes some traces nicer by getting rid of multiplication for calculating loop counter +and propagates that n > 0 further to get rid of guards. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -80,8 +80,9 @@ # register the minimal equivalent of running a small piece of code. This # should be used as sparsely as possible, just to register callbacks - from rpython.rlib.entrypoint import entrypoint + from rpython.rlib.entrypoint import entrypoint, RPython_StartupCode from rpython.rtyper.lltypesystem import rffi, lltype + from rpython.rtyper.lltypesystem.lloperation import llop w_pathsetter = space.appexec([], """(): def f(path): @@ -90,9 +91,10 @@ return f """) - @entrypoint('main', [rffi.CCHARP, lltype.Signed], c_name='pypy_setup_home') + @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib + verbose = rffi.cast(lltype.Signed, verbose) if ll_home: home = rffi.charp2str(ll_home) else: @@ -115,19 +117,38 @@ debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return 1 + return -1 @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): + after = rffi.aroundstate.after + if after: after() source = rffi.charp2str(ll_source) - return _pypy_execute_source(source) + res = _pypy_execute_source(source) + before = rffi.aroundstate.before + if before: before() + return rffi.cast(rffi.INT, res) + + @entrypoint('main', [rffi.CCHARP, lltype.Signed], + c_name='pypy_execute_source_ptr') + def pypy_execute_source_ptr(ll_source, ll_ptr): + after = rffi.aroundstate.after + if after: after() + source = rffi.charp2str(ll_source) + space.setitem(w_globals, space.wrap('c_argument'), + space.wrap(ll_ptr)) + res = _pypy_execute_source(source) + before = rffi.aroundstate.before + if before: before() + return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') def pypy_init_threads(): if not space.config.objspace.usemodules.thread: return os_thread.setup_threads(space) - rffi.aroundstate.before() + before = rffi.aroundstate.before + if before: before() @entrypoint('main', [], c_name='pypy_thread_attach') def pypy_thread_attach(): @@ -138,7 +159,8 @@ rthread.gc_thread_start() os_thread.bootstrapper.nbthreads += 1 os_thread.bootstrapper.release() - rffi.aroundstate.before() + before = rffi.aroundstate.before + if before: before() w_globals = space.newdict() space.setitem(w_globals, space.wrap('__builtins__'), @@ -153,10 +175,11 @@ debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return 1 + return -1 return 0 return entry_point, {'pypy_execute_source': pypy_execute_source, + 'pypy_execute_source_ptr': pypy_execute_source_ptr, 'pypy_init_threads': pypy_init_threads, 'pypy_thread_attach': pypy_thread_attach, 'pypy_setup_home': pypy_setup_home} diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -1,11 +1,11 @@ """ Arguments objects. """ - -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.debug import make_sure_not_resized from rpython.rlib import jit +from pypy.interpreter.error import OperationError, oefmt + class Arguments(object): """ @@ -86,9 +86,9 @@ args_w = space.fixedview(w_stararg) except OperationError, e: if e.match(space, space.w_TypeError): - raise operationerrfmt( - space.w_TypeError, - "argument after * must be a sequence, not %T", w_stararg) + raise oefmt(space.w_TypeError, + "argument after * must be a sequence, not %T", + w_stararg) raise self.arguments_w = self.arguments_w + args_w @@ -113,10 +113,9 @@ w_keys = space.call_method(w_starstararg, "keys") except OperationError, e: if e.match(space, space.w_AttributeError): - raise operationerrfmt( - space.w_TypeError, - "argument after ** must be a mapping, not %T", - w_starstararg) + raise oefmt(space.w_TypeError, + "argument after ** must be a mapping, not %T", + w_starstararg) raise keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) @@ -281,8 +280,7 @@ self._match_signature(w_firstarg, scope_w, signature, defaults_w, 0) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %s", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg()) return signature.scope_length() def _parse(self, w_firstarg, signature, defaults_w, blindargs=0): @@ -304,8 +302,7 @@ try: return self._parse(w_firstarg, signature, defaults_w, blindargs) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %s", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg()) @staticmethod def frompacked(space, w_args=None, w_kwds=None): @@ -344,10 +341,9 @@ for key in keywords: for otherkey in existingkeywords: if otherkey == key: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) def _do_combine_starstarargs_wrapped(space, keys_w, w_starstararg, keywords, keywords_w, existingkeywords): @@ -367,10 +363,9 @@ raise else: if existingkeywords and key in existingkeywords: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) keywords[i] = key keywords_w[i] = space.getitem(w_starstararg, w_key) i += 1 diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -1,12 +1,18 @@ # Generated by tools/asdl_py.py -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app + + +def raise_attriberr(space, w_obj, name): + raise oefmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", w_obj, name) + def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or @@ -70,11 +76,13 @@ continue # field is optional w_obj = self.getdictvalue(space, missing) if w_obj is None: - err = "required field \"%s\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "required field \"%s\" missing from %s", + missing, host) else: - err = "incorrect type for field \"%s\" in %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "incorrect type for field \"%s\" in %s", + missing, host) raise AssertionError("should not reach here") @@ -2793,7 +2801,7 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2834,7 +2842,7 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2879,7 +2887,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -2922,7 +2930,7 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2967,7 +2975,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -2988,7 +2996,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3018,7 +3026,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def FunctionDef_set_name(space, w_self, w_new_value): @@ -3039,7 +3047,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) From noreply at buildbot.pypy.org Thu Mar 6 00:05:24 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 6 Mar 2014 00:05:24 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Fix import of lltype_to_annotation. Message-ID: <20140305230524.0C4AF1C0290@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r69742:e1e709fd181c Date: 2014-03-06 00:04 +0100 http://bitbucket.org/pypy/pypy/changeset/e1e709fd181c/ Log: Fix import of lltype_to_annotation. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -6,7 +6,6 @@ from py.path import local from py.process import cmdexec -from rpython.annotator import model as annmodel from rpython.conftest import cdir from rpython.flowspace.model import mkentrymap, Constant, Variable from rpython.memory.gctransform.llvmgcroot import ( @@ -19,6 +18,7 @@ from rpython.rlib.objectmodel import (Symbolic, ComputedIntSymbolic, CDefinedIntSymbolic, malloc_zero_filled, running_on_llinterp) from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.lltypesystem import (llarena, llgroup, llmemory, lltype, rffi) from rpython.rtyper.lltypesystem.ll2ctypes import (_llvm_needs_header, @@ -1679,9 +1679,9 @@ main.c_name = 'main' mixlevelannotator = MixLevelHelperAnnotator(self.translator.rtyper) - arg1 = annmodel.lltype_to_annotation(rffi.INT) - arg2 = annmodel.lltype_to_annotation(rffi.CCHARPP) - res = annmodel.lltype_to_annotation(lltype.Signed) + arg1 = lltype_to_annotation(rffi.INT) + arg2 = lltype_to_annotation(rffi.CCHARPP) + res = lltype_to_annotation(lltype.Signed) graph = mixlevelannotator.getgraph(main, [arg1, arg2], res) mixlevelannotator.finish() mixlevelannotator.backend_optimize() From noreply at buildbot.pypy.org Thu Mar 6 01:01:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 6 Mar 2014 01:01:55 +0100 (CET) Subject: [pypy-commit] pypy default: update whatsnew Message-ID: <20140306000155.41FEF1C0290@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69743:506e1379b98c Date: 2014-03-05 19:00 -0500 http://bitbucket.org/pypy/pypy/changeset/506e1379b98c/ Log: update whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -99,3 +99,6 @@ Implements SimpleRangeListStrategy for case range(n) where n is a positive number. Makes some traces nicer by getting rid of multiplication for calculating loop counter and propagates that n > 0 further to get rid of guards. + +.. branch: popen-pclose +Provide an exit status for popen'ed RFiles via pclose From noreply at buildbot.pypy.org Thu Mar 6 02:32:36 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 6 Mar 2014 02:32:36 +0100 (CET) Subject: [pypy-commit] pypy default: fix/py3k compat Message-ID: <20140306013236.808721C35CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69744:cf32f7a88ca2 Date: 2014-03-05 17:02 -0800 http://bitbucket.org/pypy/pypy/changeset/cf32f7a88ca2/ Log: fix/py3k compat diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -576,10 +576,13 @@ assert a == 9007199254740991 a = operator.truediv(x, 7) assert a == 9007199254740991.0 - exec("from __future__ import division; " - "a = x / 7; b = operator.truediv(x, 7)") - assert a == 9007199254740991.0 - assert b == 9007199254740991.0 + + def test_truediv_future(self): + ns = dict(x=63050394783186940) + exec("from __future__ import division; import operator; " + "a = x / 7; b = operator.truediv(x, 7)", ns) + assert ns['a'] == 9007199254740991.0 + assert ns['b'] == 9007199254740991.0 class AppTestIntShortcut(AppTestInt): From noreply at buildbot.pypy.org Thu Mar 6 02:32:37 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 6 Mar 2014 02:32:37 +0100 (CET) Subject: [pypy-commit] pypy default: merge upstream Message-ID: <20140306013237.DD8981C35CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69745:de74625112ee Date: 2014-03-05 17:03 -0800 http://bitbucket.org/pypy/pypy/changeset/de74625112ee/ Log: merge upstream diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -99,3 +99,6 @@ Implements SimpleRangeListStrategy for case range(n) where n is a positive number. Makes some traces nicer by getting rid of multiplication for calculating loop counter and propagates that n > 0 further to get rid of guards. + +.. branch: popen-pclose +Provide an exit status for popen'ed RFiles via pclose diff --git a/pypy/interpreter/special.py b/pypy/interpreter/special.py --- a/pypy/interpreter/special.py +++ b/pypy/interpreter/special.py @@ -2,16 +2,10 @@ class Ellipsis(W_Root): - def __init__(self, space): - self.space = space - - def descr__repr__(self): - return self.space.wrap('Ellipsis') + def descr__repr__(self, space): + return space.wrap('Ellipsis') class NotImplemented(W_Root): - def __init__(self, space): - self.space = space - - def descr__repr__(self): - return self.space.wrap('NotImplemented') + def descr__repr__(self, space): + return space.wrap('NotImplemented') diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -71,8 +71,8 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild - self.w_Ellipsis = special.Ellipsis(self) - self.w_NotImplemented = special.NotImplemented(self) + self.w_Ellipsis = special.Ellipsis() + self.w_NotImplemented = special.NotImplemented() def _freeze_(self): return True diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2328,6 +2328,16 @@ a[...] = 4 assert (a == [4, 4, 4]).all() + b = np.arange(24).reshape(2,3,4) + b[...] = 100 + assert (b == 100).all() + assert b.shape == (2, 3, 4) + b[...] = [10, 20, 30, 40] + assert (b[:,:,0] == 10).all() + assert (b[0,0,:] == [10, 20, 30, 40]).all() + assert b.shape == b[...].shape + assert (b == b[...]).all() + class AppTestNumArrayFromBuffer(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -58,8 +58,8 @@ self.w_None = W_NoneObject.w_None self.w_False = W_BoolObject.w_False self.w_True = W_BoolObject.w_True - self.w_NotImplemented = self.wrap(special.NotImplemented(self)) - self.w_Ellipsis = self.wrap(special.Ellipsis(self)) + self.w_NotImplemented = self.wrap(special.NotImplemented()) + self.w_Ellipsis = self.wrap(special.Ellipsis()) # types self.builtin_types = {} diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -431,7 +431,7 @@ intlist.find(w(4), 0, 2) -class AppTestW_ListObject(object): +class AppTestListObject(object): def setup_class(cls): import platform import sys @@ -525,6 +525,18 @@ l.__init__(assignment) assert l == list(assignment) + def test_range_init(self): + x = range(5,1) + assert x == [] + + x = range(1,10) + x[22:0:-1] == range(1,10) + + r = range(10, 10) + assert len(r) == 0 + assert list(reversed(r)) == [] + assert r[:] == [] + def test_extend_list(self): l = l0 = [1] l.extend([2]) @@ -609,24 +621,28 @@ def test_sort_key(self): def lower(x): return x.lower() l = ['a', 'C', 'b'] - l.sort(key = lower) + l.sort(key=lower) assert l == ['a', 'b', 'C'] l = [] - l.sort(key = lower) + l.sort(key=lower) assert l == [] - l = [ 'a' ] - l.sort(key = lower) - assert l == [ 'a' ] + l = ['a'] + l.sort(key=lower) + assert l == ['a'] + + r = range(10) + r.sort(key=lambda x: -x) + assert r == range(9, -1, -1) def test_sort_reversed(self): l = range(10) - l.sort(reverse = True) + l.sort(reverse=True) assert l == range(9, -1, -1) l = [] - l.sort(reverse = True) + l.sort(reverse=True) assert l == [] l = [1] - l.sort(reverse = True) + l.sort(reverse=True) assert l == [1] def test_sort_cmp_key_reverse(self): @@ -640,6 +656,17 @@ l.sort() assert l == ["a", "b", "c", "d"] + def test_sort_range(self): + l = range(3, 10, 3) + l.sort() + assert l == [3, 6, 9] + l.sort(reverse=True) + assert l == [9, 6, 3] + l.sort(reverse=True) + assert l == [9, 6, 3] + l.sort() + assert l == [3, 6, 9] + def test_getitem(self): l = [1, 2, 3, 4, 5, 6, 9] assert l[0] == 1 @@ -663,6 +690,23 @@ l = [] raises(IndexError, "l[1]") + def test_getitem_range(self): + l = range(5) + raises(IndexError, "l[-6]") + raises(IndexError, "l[5]") + assert l[0] == 0 + assert l[-1] == 4 + assert l[-2] == 3 + assert l[-5] == 0 + + l = range(1, 5) + raises(IndexError, "l[-5]") + raises(IndexError, "l[4]") + assert l[0] == 1 + assert l[-1] == 4 + assert l[-2] == 3 + assert l[-4] == 1 + def test_setitem(self): l = [] raises(IndexError, "l[1] = 2") @@ -675,6 +719,10 @@ l[0] = "2" assert l == ["2",3] + l = range(3) + l[0] = 1 + assert l == [1,1,2] + def test_delitem(self): l = [1, 2, 3, 4, 5, 6, 9] del l[0] @@ -740,6 +788,29 @@ assert l[1:0:None] == [] assert l[1:0] == [] + def test_getslice_invalid(self): + x = [1,2,3,4] + assert x[10:0] == [] + assert x[10:0:None] == [] + + x = range(1,5) + assert x[10:0] == [] + assert x[10:0:None] == [] + + assert x[0:22] == [1,2,3,4] + assert x[-1:10] == [4] + + assert x[0:22:None] == [1,2,3,4] + assert x[-1:10:None] == [4] + + def test_getslice_range_backwards(self): + x = range(1,10) + assert x[22:-10] == [] + assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1] + assert x[10:3:-1] == [9,8,7,6,5] + assert x[10:3:-2] == [9,7,5] + assert x[1:5:-1] == [] + def test_delall(self): l = l0 = [1,2,3] del l[:] @@ -777,6 +848,13 @@ l1 += [0] assert l1 == ['a', 'b', 'c', 0] + r1 = r2 = range(5) + assert r1 is r2 + r1 += [15] + assert r1 is r2 + assert r1 == [0, 1, 2, 3, 4, 15] + assert r2 == [0, 1, 2, 3, 4, 15] + def test_iadd_iterable(self): l = l0 = [1,2,3] l += iter([4,5]) @@ -835,6 +913,13 @@ l *= 2 assert l == [0, 1, 0, 1] + r1 = r2 = range(3) + assert r1 is r2 + r1 *= 2 + assert r1 is r2 + assert r1 == [0, 1, 2, 0, 1, 2] + assert r2 == [0, 1, 2, 0, 1, 2] + def test_mul_errors(self): try: [1, 2, 3] * (3,) @@ -916,6 +1001,11 @@ assert l == [] assert l is l0 + l = [] + l2 = range(3) + l.__setslice__(0,3,l2) + assert l == [0,1,2] + def test_assign_extended_slice(self): l = l0 = ['a', 'b', 'c'] l[::-1] = ['a', 'b', 'c'] @@ -1002,10 +1092,6 @@ l.append(x) assert l == range(5) - l = range(4) - l.append(4) - assert l == range(5) - l = [1,2,3] l.append("a") assert l == [1,2,3,"a"] @@ -1014,6 +1100,22 @@ l.append(4.4) assert l == [1.1, 2.2, 3.3, 4.4] + l = range(4) + l.append(4) + assert l == range(5) + + l = range(5) + l.append(26) + assert l == [0,1,2,3,4,26] + + l = range(5) + l.append("a") + assert l == [0,1,2,3,4,"a"] + + l = range(5) + l.append(5) + assert l == [0,1,2,3,4,5] + def test_count(self): c = list('hello') assert c.count('l') == 2 @@ -1041,6 +1143,10 @@ l.insert(0,"a") assert l == ["a", 1, 2, 3] + l = range(3) + l.insert(1,5) + assert l == [0,5,1,2] + def test_pop(self): c = list('hello world') s = '' @@ -1053,6 +1159,7 @@ l = range(10) l.pop() assert l == range(9) + assert l.pop(0) == 0 l = [1.1, 2.2, 3.3] l.pop() @@ -1123,6 +1230,16 @@ c.reverse() assert ''.join(c) == 'dlrow olleh' + l = range(3) + l.reverse() + assert l == [2,1,0] + + r = range(3) + r[0] = 1 + assert r == [1, 1, 2] + r.reverse() + assert r == [2, 1, 1] + def test_reversed(self): assert list(list('hello').__reversed__()) == ['o', 'l', 'l', 'e', 'h'] assert list(reversed(list('hello'))) == ['o', 'l', 'l', 'e', 'h'] @@ -1387,106 +1504,27 @@ # assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"] + def test_no_len_on_range_iter(self): + iterable = range(10) + raises(TypeError, len, iter(iterable)) -class AppTestForRangeLists(AppTestW_ListObject): - spaceconfig = {"objspace.std.withrangelist": True} - - def test_range_simple_backwards(self): - x = range(5,1) - assert x == [] - - def test_range_big_start(self): - x = range(1,10) - x[22:0:-1] == range(1,10) - - def test_range_list_invalid_slice(self): - x = [1,2,3,4] - assert x[10:0] == [] - assert x[10:0:None] == [] - - x = range(1,5) - assert x[10:0] == [] - assert x[10:0:None] == [] - - assert x[0:22] == [1,2,3,4] - assert x[-1:10] == [4] - - assert x[0:22:None] == [1,2,3,4] - assert x[-1:10:None] == [4] - - def test_range_backwards(self): - x = range(1,10) - assert x[22:-10] == [] - assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1] - assert x[10:3:-1] == [9,8,7,6,5] - assert x[10:3:-2] == [9,7,5] - assert x[1:5:-1] == [] - - def test_sort_range(self): - l = range(3,10,3) - l.sort() - assert l == [3, 6, 9] - l.sort(reverse = True) - assert l == [9, 6, 3] - l.sort(reverse = True) - assert l == [9, 6, 3] - l.sort() - assert l == [3, 6, 9] - - def test_slice(self): - l = [] - l2 = range(3) - l.__setslice__(0,3,l2) - assert l == [0,1,2] - - def test_getitem_range(self): - l = range(5) - raises(IndexError, "l[-6]") - raises(IndexError, "l[5]") - assert l[0] == 0 - assert l[-1] == 4 - assert l[-2] == 3 - assert l[-5] == 0 - - l = range(1, 5) - raises(IndexError, "l[-5]") - raises(IndexError, "l[4]") - assert l[0] == 1 - assert l[-1] == 4 - assert l[-2] == 3 - assert l[-4] == 1 - - def test_append(self): - l = range(5) - l.append(26) - assert l == [0,1,2,3,4,26] - - l = range(5) - l.append("a") - assert l == [0,1,2,3,4,"a"] - - l = range(5) - l.append(5) - assert l == [0,1,2,3,4,5] - - def test_pop(self): - l = range(3) - assert l.pop(0) == 0 - - def test_setitem(self): - l = range(3) - l[0] = 1 - assert l == [1,1,2] - - def test_inset(self): - l = range(3) - l.insert(1,5) - assert l == [0,5,1,2] - - def test_reverse(self): - l = range(3) - l.reverse() - assert l == [2,1,0] + def test_reduce(self): + if self.on_cpython: + skip("cpython raises TypeError") # XXX investigate + it = iter(range(10)) + assert it.next() == 0 + assert it.next() == 1 + assert it.next() == 2 + assert it.next() == 3 + seqiter_new, args = it.__reduce__() + assert it.next() == 4 + assert it.next() == 5 + it2 = seqiter_new(*args) + assert it2.next() == 4 + assert it2.next() == 5 + it3 = seqiter_new(*args) + assert it3.next() == 4 + assert it3.next() == 5 def test_issue1266(self): l = range(1) @@ -1518,7 +1556,114 @@ assert item11 in l[::11] -class AppTestWithoutStrategies(object): +class AppTestListObjectWithRangeList(AppTestListObject): + """Run the list object tests with range lists enabled. Tests should go in + AppTestListObject so they can be run -A against CPython as well. + """ + spaceconfig = {"objspace.std.withrangelist": True} + + +class AppTestRangeListForcing: + """Tests for range lists that test forcing. Regular tests should go in + AppTestListObject so they can be run -A against CPython as well. Separate + from AppTestListObjectWithRangeList so we don't silently overwrite tests + with the same names. + """ + spaceconfig = {"objspace.std.withrangelist": True} + + def setup_class(cls): + if cls.runappdirect: + py.test.skip("__pypy__.internal_repr() cannot be used to see " + "if a range list was forced on top of pypy-c") + cls.w_not_forced = cls.space.appexec([], """(): + import __pypy__ + def f(r): + return (isinstance(r, list) and + "RangeListStrategy" in __pypy__.internal_repr(r)) + return f + """) + + def test_simple(self): + result = [] + r = range(1, 8, 2) + for i in r: + result.append(i) + assert result == [1, 3, 5, 7] + assert self.not_forced(r) + + def test_getitem_slice(self): + result = [] + r = range(1, 100, 2) + for i in r[10:15]: + result.append(i) + assert result == [21, 23, 25, 27, 29] + assert not self.not_forced(r) + + def test_getitem_extended_slice(self): + result = [] + r = range(1, 100, 2) + for i in r[40:30:-2]: + result.append(i) + assert result == [81, 77, 73, 69, 65] + assert not self.not_forced(r) + + def test_repr(self): + r = range(5) + assert repr(r) == "[0, 1, 2, 3, 4]" + assert self.not_forced(r) + + def test_force(self): + r = range(10) + r[0] = 42 + assert not self.not_forced(r) + assert r == [42, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + def test_reverse(self): + r = range(10) + r.reverse() + assert not self.not_forced(r) + assert r == range(9, -1, -1) + + def test_pop(self): + # RangeListStrategy + r = range(1, 10) + res = r.pop() + assert res == 9 + assert self.not_forced(r) + assert repr(r) == repr(range(1, 9)) + res = r.pop(0) + assert res == 1 + assert self.not_forced(r) + assert repr(r) == repr(range(2, 9)) + res = r.pop(len(r) - 1) + assert res == 8 + assert self.not_forced(r) + assert repr(r) == repr(range(2, 8)) + res = r.pop(2) + assert res == 4 + assert not self.not_forced(r) + assert r == [2, 3, 5, 6, 7] + res = r.pop(2) + assert res == 5 + assert not self.not_forced(r) + assert r == [2, 3, 6, 7] + + # SimpleRangeListStrategy + r = range(10) + res = r.pop() + assert res == 9 + assert self.not_forced(r) + res = r.pop() + assert res == 8 + assert repr(r) == repr(range(8)) + assert self.not_forced(r) + res = r.pop(0) + assert res == 0 + assert not self.not_forced(r) + assert r == [1, 2, 3, 4, 5, 6, 7] + + +class AppTestWithoutStrategies: spaceconfig = {"objspace.std.withliststrategies": False} def test_no_shared_empty_list(self): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -6,8 +6,8 @@ from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject + class TestW_ListStrategies(TestW_ListObject): - def test_check_strategy(self): space = self.space w = space.wrap @@ -236,7 +236,6 @@ l.setslice(0, 1, 2, make_range_list(space, 5, 1, 4)) assert isinstance(l.strategy, IntegerListStrategy) - def test_setslice_List(self): space = self.space @@ -705,7 +704,6 @@ w_l2.sort(False) assert space.eq_w(w_l, w_l2) - def test_listview_bytes_list(self): space = self.space w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b")]) diff --git a/pypy/objspace/std/test/test_rangeobject.py b/pypy/objspace/std/test/test_rangeobject.py deleted file mode 100644 --- a/pypy/objspace/std/test/test_rangeobject.py +++ /dev/null @@ -1,156 +0,0 @@ -import py - - -class AppTestRangeListObject(object): - spaceconfig = {"objspace.std.withrangelist": True} - - def setup_class(cls): - if cls.runappdirect: - py.test.skip("__pypy__.internal_repr() cannot be used to see " - "if a range list was forced on top of pypy-c") - cls.w_not_forced = cls.space.appexec([], """(): - import __pypy__ - def f(r): - return (isinstance(r, list) and - "RangeListStrategy" in __pypy__.internal_repr(r)) - return f - """) - cls.w_SORT_FORCES_LISTS = cls.space.wrap(False) - - def test_simple(self): - result = [] - r = range(1, 8, 2) - for i in r: - result.append(i) - assert result == [1, 3, 5, 7] - assert self.not_forced(r) - - def test_getitem_simple(self): - r = range(4) - assert r[-1] == 3 - assert r[3] == 3 - assert r[-4] == 0 - raises(IndexError, r.__getitem__, -5) - raises(IndexError, r.__getitem__, 4) - - def test_getitem_slice(self): - result = [] - r = range(1, 100, 2) - for i in r[10:15]: - result.append(i) - assert result == [21, 23, 25, 27, 29] - assert not self.not_forced(r) - - def test_getitem_extended_slice(self): - result = [] - r = range(1, 100, 2) - for i in r[40:30:-2]: - result.append(i) - assert result == [81, 77, 73, 69, 65] - assert not self.not_forced(r) - - def test_empty_range(self): - r = range(10, 10) - assert len(r) == 0 - assert list(reversed(r)) == [] - assert r[:] == [] - - def test_repr(self): - r = range(5) - assert repr(r) == "[0, 1, 2, 3, 4]" - assert self.not_forced(r) - - def test_force(self): - r = range(10) - r[0] = 42 - assert not self.not_forced(r) - assert r == [42, 1, 2, 3, 4, 5, 6, 7, 8, 9] - - def test_reverse(self): - r = range(10) - r.reverse() - assert not self.not_forced(r) - assert r == range(9, -1, -1) - r = range(3) - r[0] = 1 - assert r == [1, 1, 2] - r.reverse() - assert r == [2, 1, 1] - - r = range(10) - r.sort(key=lambda x: -x) - assert r == range(9, -1, -1) - - def test_pop(self): - # RangeListStrategy - r = range(1, 10) - res = r.pop() - assert res == 9 - assert self.not_forced(r) - assert repr(r) == repr(range(1, 9)) - res = r.pop(0) - assert res == 1 - assert self.not_forced(r) - assert repr(r) == repr(range(2, 9)) - res = r.pop(len(r) - 1) - assert res == 8 - assert self.not_forced(r) - assert repr(r) == repr(range(2, 8)) - res = r.pop(2) - assert res == 4 - assert not self.not_forced(r) - assert r == [2, 3, 5, 6, 7] - res = r.pop(2) - assert res == 5 - assert not self.not_forced(r) - assert r == [2, 3, 6, 7] - - # SimpleRangeListStrategy - r = range(10) - res = r.pop() - assert res == 9 - assert self.not_forced(r) - res = r.pop() - assert res == 8 - assert repr(r) == repr(range(8)) - assert self.not_forced(r) - res = r.pop(0) - assert res == 0 - assert not self.not_forced(r) - assert r == [1, 2, 3, 4, 5, 6, 7] - - def test_reduce(self): - it = iter(range(10)) - assert it.next() == 0 - assert it.next() == 1 - assert it.next() == 2 - assert it.next() == 3 - seqiter_new, args = it.__reduce__() - assert it.next() == 4 - assert it.next() == 5 - it2 = seqiter_new(*args) - assert it2.next() == 4 - assert it2.next() == 5 - it3 = seqiter_new(*args) - assert it3.next() == 4 - assert it3.next() == 5 - - def test_no_len_on_range_iter(self): - iterable = range(10) - raises(TypeError, len, iter(iterable)) - - def test_inplace_add(self): - r1 = r2 = range(5) - assert r1 is r2 - r1 += [15] - assert r1 is r2 - assert r1 == [0, 1, 2, 3, 4, 15] - assert r2 == [0, 1, 2, 3, 4, 15] - - def test_inplace_mul(self): - r1 = r2 = range(3) - assert r1 is r2 - r1 *= 2 - assert r1 is r2 - assert r1 == [0, 1, 2, 0, 1, 2] - assert r2 == [0, 1, 2, 0, 1, 2] From noreply at buildbot.pypy.org Thu Mar 6 02:32:42 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 6 Mar 2014 02:32:42 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140306013242.48C7D1C35CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69746:af06b063d0b8 Date: 2014-03-05 17:02 -0800 http://bitbucket.org/pypy/pypy/changeset/af06b063d0b8/ Log: merge default diff too long, truncating to 2000 out of 27503 lines diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -8,10 +8,11 @@ extern "C" { #endif - -/* You should call this first once. */ +// call this first void rpython_startup_code(void); +// pypy_init_threads has to be called in case you want to use threads +void pypy_init_threads(void); /* Initialize the home directory of PyPy. It is necessary to call this. @@ -26,11 +27,10 @@ /* If your program has multiple threads, then you need to call - pypy_init_threads() once at init time, and then pypy_thread_attach() - once in each other thread that just started and in which you want to - run Python code (including via callbacks, see below). + pypy_thread_attach() once in each other thread that just started + and in which you want to run Python code (including via callbacks, + see below). DO NOT CALL IT IN THE MAIN THREAD */ -void pypy_init_threads(void); void pypy_thread_attach(void); @@ -46,6 +46,12 @@ */ int pypy_execute_source(char *source); +/* a similar function, but inside Python code it'll register + a magic argument c_argument as int, which will be passed as void* from C. + Useful for passing pointers to arbitrary structs that contain callbacks + to register */ +int pypy_execute_source_ptr(char *source, void* ptr); + #ifdef __cplusplus } diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -105,7 +105,6 @@ self.assertEqual(ArgType, type(parm)) - @xfail def test_floats(self): # c_float and c_double can be created from # Python int, long and float diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -129,9 +129,13 @@ fp = os.tmpfile() except OSError, second: self.assertEqual(first.args, second.args) + return else: - self.fail("expected os.tmpfile() to raise OSError") - return + if test_support.check_impl_detail(pypy=False): + self.fail("expected os.tmpfile() to raise OSError") + # on PyPy, os.tmpfile() uses the tempfile module + # anyway, so works even if we cannot write in root. + fp.close() else: # open() worked, therefore, tmpfile() should work. Close our # dummy file and proceed with the test as normal. diff --git a/lib-python/2.7/threading.py b/lib-python/2.7/threading.py --- a/lib-python/2.7/threading.py +++ b/lib-python/2.7/threading.py @@ -244,22 +244,18 @@ if __debug__: self._note("%s.wait(): got it", self) else: - # Balancing act: We can't afford a pure busy loop, so we - # have to sleep; but if we sleep the whole timeout time, - # we'll be unresponsive. The scheme here sleeps very - # little at first, longer as time goes on, but never longer - # than 20 times per second (or the timeout time remaining). - endtime = _time() + timeout - delay = 0.0005 # 500 us -> initial delay of 1 ms - while True: - gotit = waiter.acquire(0) - if gotit: - break - remaining = endtime - _time() - if remaining <= 0: - break - delay = min(delay * 2, remaining, .05) - _sleep(delay) + # PyPy patch: use _py3k_acquire() + if timeout > 0: + try: + gotit = waiter._py3k_acquire(True, timeout) + except OverflowError: + # bah, in Python 3, acquire(True, timeout) raises + # OverflowError if the timeout is too huge. For + # forward-compatibility reasons we do the same. + waiter.acquire() + gotit = True + else: + gotit = waiter.acquire(False) if not gotit: if __debug__: self._note("%s.wait(%s): timed out", self, timeout) diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.1" -__version_info__ = (0, 8, 1) +__version__ = "0.8.2" +__version_info__ = (0, 8, 2) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,4 @@ -import types +import sys, types from .lock import allocate_lock try: @@ -88,18 +88,20 @@ self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() - def cdef(self, csource, override=False): + def cdef(self, csource, override=False, packed=False): """Parse the given C source. This registers all declared functions, types, and global variables. The functions and global variables can then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. The types can be used in 'ffi.new()' and other functions. + If 'packed' is specified as True, all structs declared inside this + cdef are packed, i.e. laid out without any field alignment at all. """ if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: - self._parser.parse(csource, override=override) + self._parser.parse(csource, override=override, packed=packed) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -387,22 +389,27 @@ return self._backend.from_handle(x) -def _make_ffi_library(ffi, libname, flags): - import os - name = libname +def _load_backend_lib(backend, name, flags): if name is None: - name = 'c' # on Posix only - backend = ffi._backend + if sys.platform != "win32": + return backend.load_library(None, flags) + name = "c" # Windows: load_library(None) fails, but this works + # (backward compatibility hack only) try: if '.' not in name and '/' not in name: raise OSError("library not found: %r" % (name,)) - backendlib = backend.load_library(name, flags) + return backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: raise # propagate the original OSError - backendlib = backend.load_library(path, flags) + return backend.load_library(path, flags) + +def _make_ffi_library(ffi, libname, flags): + import os + backend = ffi._backend + backendlib = _load_backend_lib(backend, libname, flags) copied_enums = [] # def make_accessor_locked(name): diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -720,7 +720,7 @@ return self._new_struct_or_union('union', name, ctypes.Union) def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, - totalsize=-1, totalalignment=-1): + totalsize=-1, totalalignment=-1, sflags=0): if totalsize >= 0 or totalalignment >= 0: raise NotImplementedError("the ctypes backend of CFFI does not support " "structures completed by verify(); please " @@ -739,6 +739,8 @@ else: cfields.append((fname, BField._ctype, bitsize)) bfield_types[fname] = Ellipsis + if sflags & 8: + struct_or_union._pack_ = 1 struct_or_union._fields_ = cfields CTypesStructOrUnion._bfield_types = bfield_types # diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -98,6 +98,7 @@ self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() self._override = False + self._packed = False def _parse(self, csource): csource, macros = _preprocess(csource) @@ -147,13 +148,16 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False): + def parse(self, csource, override=False, packed=False): prev_override = self._override + prev_packed = self._packed try: self._override = override + self._packed = packed self._internal_parse(csource) finally: self._override = prev_override + self._packed = prev_packed def _internal_parse(self, csource): ast, macros = self._parse(csource) @@ -476,6 +480,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) + tp.packed = self._packed return tp def _make_partial(self, tp, nested): diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,4 +1,6 @@ +import types import weakref + from .lock import allocate_lock @@ -81,29 +83,29 @@ 'long': 'i', 'long long': 'i', 'signed char': 'i', - 'unsigned char': 'u', - 'unsigned short': 'u', - 'unsigned int': 'u', - 'unsigned long': 'u', - 'unsigned long long': 'u', + 'unsigned char': 'i', + 'unsigned short': 'i', + 'unsigned int': 'i', + 'unsigned long': 'i', + 'unsigned long long': 'i', 'float': 'f', 'double': 'f', 'long double': 'f', - '_Bool': 'u', + '_Bool': 'i', # the following types are not primitive in the C sense 'wchar_t': 'c', 'int8_t': 'i', - 'uint8_t': 'u', + 'uint8_t': 'i', 'int16_t': 'i', - 'uint16_t': 'u', + 'uint16_t': 'i', 'int32_t': 'i', - 'uint32_t': 'u', + 'uint32_t': 'i', 'int64_t': 'i', - 'uint64_t': 'u', + 'uint64_t': 'i', 'intptr_t': 'i', - 'uintptr_t': 'u', + 'uintptr_t': 'i', 'ptrdiff_t': 'i', - 'size_t': 'u', + 'size_t': 'i', 'ssize_t': 'i', } @@ -114,12 +116,8 @@ def is_char_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' - def is_signed_type(self): + def is_integer_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' - def is_unsigned_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'u' - def is_integer_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] in 'iu' def is_float_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' @@ -259,6 +257,7 @@ fixedlayout = None completed = False partial = False + packed = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -315,7 +314,11 @@ fldtypes = [tp.get_cached_btype(ffi, finishlist) for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) - ffi._backend.complete_struct_or_union(BType, lst, self) + sflags = 0 + if self.packed: + sflags = 8 # SF_PACKED + ffi._backend.complete_struct_or_union(BType, lst, self, + -1, -1, sflags) # else: fldtypes = [] @@ -468,8 +471,7 @@ # initialize the __typecache attribute, either at the module level # if ffi._backend is a module, or at the class level if ffi._backend # is some instance. - ModuleType = type(weakref) - if isinstance(ffi._backend, ModuleType): + if isinstance(ffi._backend, types.ModuleType): ffi._backend.__typecache = weakref.WeakValueDictionary() else: type(ffi._backend).__typecache = weakref.WeakValueDictionary() diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -214,10 +214,7 @@ extraarg = '' if isinstance(tp, model.PrimitiveType): if tp.is_integer_type() and tp.name != '_Bool': - if tp.is_signed_type(): - converter = '_cffi_to_c_SIGNED' - else: - converter = '_cffi_to_c_UNSIGNED' + converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) @@ -270,10 +267,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.PrimitiveType): if tp.is_integer_type(): - if tp.is_signed_type(): - return '_cffi_from_c_SIGNED(%s, %s)' % (var, tp.name) - else: - return '_cffi_from_c_UNSIGNED(%s, %s)' % (var, tp.name) + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif tp.name != 'long double': return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) else: @@ -801,25 +795,23 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble -#define _cffi_from_c_SIGNED(x, type) \ - (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ - PyLong_FromLongLong(x)) -#define _cffi_from_c_UNSIGNED(x, type) \ - (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ - sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ - PyLong_FromUnsignedLongLong(x)) +#define _cffi_from_c_int(x, type) \ + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ + sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ + PyLong_FromUnsignedLongLong(x)) \ + : (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ + PyLong_FromLongLong(x))) -#define _cffi_to_c_SIGNED(o, type) \ - (sizeof(type) == 1 ? _cffi_to_c_i8(o) : \ - sizeof(type) == 2 ? _cffi_to_c_i16(o) : \ - sizeof(type) == 4 ? _cffi_to_c_i32(o) : \ - sizeof(type) == 8 ? _cffi_to_c_i64(o) : \ - (Py_FatalError("unsupported size for type " #type), 0)) -#define _cffi_to_c_UNSIGNED(o, type) \ - (sizeof(type) == 1 ? _cffi_to_c_u8(o) : \ - sizeof(type) == 2 ? _cffi_to_c_u16(o) : \ - sizeof(type) == 4 ? _cffi_to_c_u32(o) : \ - sizeof(type) == 8 ? _cffi_to_c_u64(o) : \ +#define _cffi_to_c_int(o, type) \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ + : _cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ + : _cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ + : _cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ + : _cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -905,11 +897,13 @@ if (c_api_object == NULL) return; if (!PyCapsule_CheckExact(c_api_object)) { + Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); return; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + Py_DECREF(c_api_object); } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/lib_pypy/disassembler.py b/lib_pypy/disassembler.py deleted file mode 100644 --- a/lib_pypy/disassembler.py +++ /dev/null @@ -1,302 +0,0 @@ -"""Disassembler of Python byte code into mnemonics. - -Comes from standard library, modified for the purpose of having a structured -view on things -""" - -from __future__ import print_function -import sys -import types -import inspect - -from opcode import * -from opcode import __all__ as _opcodes_all - -__all__ = ["dis","disassemble","distb","disco"] + _opcodes_all -del _opcodes_all - -class Opcode(object): - """ An abstract base class for all opcode implementations - """ - def __init__(self, pos, lineno, arg=None, argstr=''): - self.pos = pos - self.arg = arg - self.argstr = argstr - self.lineno = lineno - self.line_starts_here = False - - def __str__(self): - if self.arg is None: - return "%s" % (self.__class__.__name__,) - return "%s (%s)" % (self.__class__.__name__, self.arg) - - def __repr__(self): - if self.arg is None: - return "<%s at %d>" % (self.__class__.__name__, self.pos) - return "<%s (%s) at %d>" % (self.__class__.__name__, self.arg, self.pos) - -class CodeRepresentation(object): - """ Representation of opcodes - """ - def __init__(self, opcodes, co, source): - self.opcodes = opcodes - self.co = co - self.map = {} - current_lineno = None - for opcode in opcodes: - self.map[opcode.pos] = opcode - if opcode.lineno != current_lineno: - opcode.line_starts_here = True - current_lineno = opcode.lineno - self.source = source.split("\n") - -def _setup(): - for opcode in opname: - if not opcode.startswith('<'): - class O(Opcode): - pass - opcode = opcode.replace('+', '_') - O.__name__ = opcode - globals()[opcode] = O - -_setup() - -def dis(x=None): - """Disassemble classes, methods, functions, or code. - - With no argument, disassemble the last traceback. - - """ - if x is None: - distb() - return - if type(x) is types.InstanceType: - x = x.__class__ - if hasattr(x, 'im_func'): - x = x.__func__ - if hasattr(x, 'func_code'): - x = x.__code__ - if hasattr(x, '__dict__'): - xxx - items = sorted(x.__dict__.items()) - for name, x1 in items: - if type(x1) in (types.MethodType, - types.FunctionType, - types.CodeType, - type): - print("Disassembly of %s:" % name) - try: - dis(x1) - except TypeError as msg: - print("Sorry:", msg) - print() - elif hasattr(x, 'co_code'): - return disassemble(x) - elif isinstance(x, str): - return disassemble_string(x) - else: - raise TypeError("don't know how to disassemble %s objects" % \ - type(x).__name__) - -def distb(tb=None): - """Disassemble a traceback (default: last traceback).""" - if tb is None: - try: - tb = sys.last_traceback - except AttributeError: - raise RuntimeError("no last traceback to disassemble") - while tb.tb_next: tb = tb.tb_next - disassemble(tb.tb_frame.f_code, tb.tb_lasti) - -def disassemble(co, lasti=-1): - """Disassemble a code object.""" - source = inspect.getsource(co) - code = co.co_code - labels = findlabels(code) - linestarts = dict(findlinestarts(co)) - n = len(code) - i = 0 - extended_arg = 0 - free = None - res = [] - lastline = co.co_firstlineno - while i < n: - c = code[i] - op = ord(c) - if i in linestarts: - lastline = linestarts[i] - - #if i == lasti: - # xxx - # print '-->', - #else: - # xxx - # print ' ', - #if i in labels: - # xxx - # print '>>', - #else: - # xxx - # print ' ', - #xxx - pos = i - i = i + 1 - if op >= HAVE_ARGUMENT: - oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg - opargstr = str(oparg) - extended_arg = 0 - i = i+2 - if op == EXTENDED_ARG: - extended_arg = oparg*65536 - if op in hasconst: - opargstr = repr(co.co_consts[oparg]) - elif op in hasname: - opargstr = co.co_names[oparg] - elif op in hasjrel: - opargstr = 'to ' + repr(i + oparg) - elif op in haslocal: - opargstr = co.co_varnames[oparg] - elif op in hascompare: - opargstr = cmp_op[oparg] - elif op in hasfree: - if free is None: - free = co.co_cellvars + co.co_freevars - opargstr = free[oparg] - else: - oparg = None - opargstr = '' - opcls = globals()[opname[op].replace('+', '_')] - res.append(opcls(pos, lastline, oparg, opargstr)) - return CodeRepresentation(res, co, source) - -def disassemble_string(code, lasti=-1, varnames=None, names=None, - constants=None): - labels = findlabels(code) - n = len(code) - i = 0 - while i < n: - c = code[i] - op = ord(c) - if i == lasti: - xxx - print('-->', end=' ') - else: - xxx - print(' ', end=' ') - if i in labels: - xxx - print('>>', end=' ') - else: - xxx - print(' ', end=' ') - xxxx - print(repr(i).rjust(4), end=' ') - print(opname[op].ljust(15), end=' ') - i = i+1 - if op >= HAVE_ARGUMENT: - oparg = ord(code[i]) + ord(code[i+1])*256 - i = i+2 - xxx - print(repr(oparg).rjust(5), end=' ') - if op in hasconst: - if constants: - xxx - print('(' + repr(constants[oparg]) + ')', end=' ') - else: - xxx - print('(%d)'%oparg, end=' ') - elif op in hasname: - if names is not None: - xxx - print('(' + names[oparg] + ')', end=' ') - else: - xxx - print('(%d)'%oparg, end=' ') - elif op in hasjrel: - xxx - print('(to ' + repr(i + oparg) + ')', end=' ') - elif op in haslocal: - if varnames: - xxx - print('(' + varnames[oparg] + ')', end=' ') - else: - xxx - print('(%d)' % oparg, end=' ') - elif op in hascompare: - xxx - print('(' + cmp_op[oparg] + ')', end=' ') - xxx - print() - -disco = disassemble # XXX For backwards compatibility - -def findlabels(code): - """Detect all offsets in a byte code which are jump targets. - - Return the list of offsets. - - """ - labels = [] - n = len(code) - i = 0 - while i < n: - c = code[i] - op = ord(c) - i = i+1 - if op >= HAVE_ARGUMENT: - oparg = ord(code[i]) + ord(code[i+1])*256 - i = i+2 - label = -1 - if op in hasjrel: - label = i+oparg - elif op in hasjabs: - label = oparg - if label >= 0: - if label not in labels: - labels.append(label) - return labels - -def findlinestarts(code): - """Find the offsets in a byte code which are start of lines in the source. - - Generate pairs (offset, lineno) as described in Python/compile.c. - - """ - byte_increments = [ord(c) for c in code.co_lnotab[0::2]] - line_increments = [ord(c) for c in code.co_lnotab[1::2]] - - lastlineno = None - lineno = code.co_firstlineno - addr = 0 - for byte_incr, line_incr in zip(byte_increments, line_increments): - if byte_incr: - if lineno != lastlineno: - yield (addr, lineno) - lastlineno = lineno - addr += byte_incr - lineno += line_incr - if lineno != lastlineno: - yield (addr, lineno) - -def _test(): - """Simple test program to disassemble a file.""" - if sys.argv[1:]: - if sys.argv[2:]: - sys.stderr.write("usage: python dis.py [-|file]\n") - sys.exit(2) - fn = sys.argv[1] - if not fn or fn == "-": - fn = None - else: - fn = None - if fn is None: - f = sys.stdin - else: - f = open(fn) - source = f.read() - if fn is not None: - f.close() - else: - fn = "" - code = compile(source, fn, "exec") - dis(code) diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/embedding.rst @@ -0,0 +1,174 @@ + +Embedding PyPy +-------------- + +PyPy has a very minimal and a very strange embedding interface, based on +the usage of `cffi`_ and the philosophy that Python is a better language than +C. It was developed in collaboration with Roberto De Ioris from the `uwsgi`_ +project. The `PyPy uwsgi plugin`_ is a good example of using the embedding API. + +The first thing that you need is to compile PyPy yourself with the option +``--shared``. We plan to make ``--shared`` the default in the future. Consult +the `how to compile PyPy`_ doc for details. This will result in ``libpypy.so`` +or ``pypy.dll`` file or something similar, depending on your platform. Consult +your platform specification for details. + +The resulting shared library exports very few functions, however they are +enough to accomplish everything you need, provided you follow a few principles. +The API is: + +.. function:: void rpython_startup_code(void); + + This is a function that you have to call (once) before calling anything else. + It initializes the RPython/PyPy GC and does a bunch of necessary startup + code. This function cannot fail. + +.. function:: void pypy_init_threads(void); + + Initialize threads. Only need to be called if there are any threads involved + +.. function:: long pypy_setup_home(char* home, int verbose); + + This function searches the PyPy standard library starting from the given + "PyPy home directory". It is not strictly necessary to execute it before + running Python code, but without it you will not be able to import any + non-builtin module from the standard library. The arguments are: + + * ``home``: NULL terminated path to an executable inside the pypy directory + (can be a .so name, can be made up) + + * ``verbose``: if non-zero, it will print error messages to stderr + + Function returns 0 on success or -1 on failure, can be called multiple times + until the library is found. + +.. function:: int pypy_execute_source(char* source); + + Execute the Python source code given in the ``source`` argument. In case of + exceptions, it will print the Python traceback to stderr and return 1, + otherwise return 0. You should really do your own error handling in the + source. It'll acquire the GIL. + +.. function:: int pypy_execute_source_ptr(char* source, void* ptr); + + Just like the above, except it registers a magic argument in the source + scope as ``c_argument``, where ``void*`` is encoded as Python int. + +.. function:: void pypy_thread_attach(void); + + In case your application uses threads that are initialized outside of PyPy, + you need to call this function to tell the PyPy GC to track this thread. + Note that this function is not thread-safe itself, so you need to guard it + with a mutex. + +Simple example +-------------- + +Note that this API is a lot more minimal than say CPython C API, so at first +it's obvious to think that you can't do much. However, the trick is to do +all the logic in Python and expose it via `cffi`_ callbacks. Let's assume +we're on linux and pypy is installed in ``/opt/pypy`` with the +library in ``/opt/pypy/bin/libpypy-c.so``. (It doesn't need to be +installed; you can also replace this path with your local checkout.) +We write a little C program: + +.. code-block:: c + + #include "include/PyPy.h" + #include + + const char source[] = "print 'hello from pypy'"; + + int main() + { + int res; + + rpython_startup_code(); + // pypy_setup_home() is not needed in this trivial example + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + +If we save it as ``x.c`` now, compile it and run it with:: + + fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. + fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x + hello from pypy + +Worked! + +More advanced example +--------------------- + +Typically we need something more to do than simply execute source. The following +is a fully fledged example, please consult cffi documentation for details. +It's a bit longish, but it captures a gist what can be done with the PyPy +embedding interface: + +.. code-block:: c + + #include "include/PyPy.h" + #include + + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + c_func = ffi.cast('int(*)(int(*)(int))', c_argument)\n\ + c_func(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + res = pypy_execute_source_ptr(source, (void*)callback); + if (res) { + printf("Error calling pypy_execute_source_ptr!\n"); + } + return res; + } + +you can compile and run it with:: + + fijal at hermann:/opt/pypy$ gcc -g -o x x.c -lpypy-c -L. + fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x + Got from C 3 + Calling to Python, result: 6 + finished the Python part + +As you can see, we successfully managed to call Python from C and C from +Python. Now having one callback might not be enough, so what typically happens +is that we would pass a struct full of callbacks to ``pypy_execute_source_ptr`` +and fill the structure from Python side for the future use. + +Threading +--------- + +In case you want to use pthreads, what you need to do is to call +``pypy_thread_attach`` from each of the threads that you created (but not +from the main thread) and call ``pypy_init_threads`` from the main thread. + +.. _`cffi`: http://cffi.readthedocs.org/ +.. _`uwsgi`: http://uwsgi-docs.readthedocs.org/en/latest/ +.. _`PyPy uwsgi plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html +.. _`how to compile PyPy`: getting-started.html diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -267,7 +267,7 @@ .. _`day 1`: http://codespeak.net/pipermail/pypy-dev/2005q2/002169.html .. _`day 2`: http://codespeak.net/pipermail/pypy-dev/2005q2/002171.html .. _`day 3`: http://codespeak.net/pipermail/pypy-dev/2005q2/002172.html -.. _`pypy-dev`: http://python.org/mailman/listinfo/pypy-dev +.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev .. _EuroPython: http://europython.org .. _`translation`: translation.html diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -243,7 +243,7 @@ discussions. .. _`contact us`: index.html -.. _`mailing list`: http://python.org/mailman/listinfo/pypy-dev +.. _`mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev ------------------------------------------------------------- OSError: ... cannot restore segment prot after reloc... Help? diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -315,27 +315,27 @@ .. _`trace example`: -Tracing bytecode and operations on objects -++++++++++++++++++++++++++++++++++++++++++ +Tracing bytecodes ++++++++++++++++++ -You can use the trace object space to monitor the interpretation -of bytecodes in connection with object space operations. To enable -it, set ``__pytrace__=1`` on the interactive PyPy console:: +You can use a simple tracing mode to monitor the interpretation of +bytecodes. To enable it, set ``__pytrace__ = 1`` on the interactive +PyPy console:: >>>> __pytrace__ = 1 Tracing enabled - >>>> a = 1 + 2 - |- <<<< enter a = 1 + 2 @ 1 >>>> - |- 0 LOAD_CONST 0 (W_IntObject(1)) - |- 3 LOAD_CONST 1 (W_IntObject(2)) - |- 6 BINARY_ADD - |- add(W_IntObject(1), W_IntObject(2)) -> W_IntObject(3) - |- 7 STORE_NAME 0 (a) - |- hash(W_StringObject('a')) -> W_IntObject(-468864544) - |- int_w(W_IntObject(-468864544)) -> -468864544 - |-10 LOAD_CONST 2 () - |-13 RETURN_VALUE - |- <<<< leave a = 1 + 2 @ 1 >>>> + >>>> x = 5 + : LOAD_CONST 0 (5) + : STORE_NAME 0 (x) + : LOAD_CONST 1 (None) + : RETURN_VALUE 0 + >>>> x + : LOAD_NAME 0 (x) + : PRINT_EXPR 0 + 5 + : LOAD_CONST 0 (None) + : RETURN_VALUE 0 + >>>> Demos ------- @@ -386,7 +386,7 @@ .. _`full Python interpreter`: getting-started-python.html .. _`the blog`: http://morepypy.blogspot.com -.. _`pypy-dev mailing list`: http://python.org/mailman/listinfo/pypy-dev +.. _`pypy-dev mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`contact possibilities`: index.html .. _`py library`: http://pylib.org diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -145,11 +145,13 @@ After you successfully manage to get PyPy's source you can read more about: - `Building and using PyPy's Python interpreter`_ + - `Embedding PyPy`_ - `Learning more about the RPython toolchain and how to develop (with) PyPy`_ - `Tutorial for how to write an interpreter with the RPython toolchain and make it fast`_ - `Look at our benchmark results`_ .. _`Building and using PyPy's Python interpreter`: getting-started-python.html +.. _`Embedding PyPy`: embedding.html .. _`Learning more about the RPython toolchain and how to develop (with) PyPy`: getting-started-dev.html .. _`Tutorial for how to write an interpreter with the RPython toolchain and make it fast`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html .. _`Look at our benchmark results`: http://speed.pypy.org diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -99,7 +99,7 @@ .. _`py-lib`: http://pylib.org/ .. _`py.test`: http://pytest.org/ .. _codespeak: http://codespeak.net/ -.. _`pypy-dev`: http://python.org/mailman/listinfo/pypy-dev +.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev Reports of 2006 diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -104,8 +104,8 @@ .. _`PyPy blog`: http://morepypy.blogspot.com/ .. _`development bug/feature tracker`: https://bugs.pypy.org .. _here: http://tismerysoft.de/pypy/irc-logs/pypy -.. _`Mercurial commit mailing list`: http://python.org/mailman/listinfo/pypy-commit -.. _`development mailing list`: http://python.org/mailman/listinfo/pypy-dev +.. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit +.. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -11,7 +11,7 @@ `mailing list`_. This is simply for the reason that small possible projects tend to change very rapidly. -This list is mostly for having on overview on potential projects. This list is +This list is mostly for having an overview on potential projects. This list is by definition not exhaustive and we're pleased if people come up with their own improvement ideas. In any case, if you feel like working on some of those projects, or anything else in PyPy, pop up on IRC or write to us on the @@ -71,7 +71,7 @@ different ways to represent a unicode string, depending on whether the string fits into ASCII, has only two-byte characters or needs four-byte characters. -The actual details would be rather differen in PyPy, but we would like to have +The actual details would be rather different in PyPy, but we would like to have the same optimization implemented. Or maybe not. We can also play around with the idea of using a single @@ -142,7 +142,7 @@ * `hg` -Embedding PyPy +Embedding PyPy and improving CFFI ---------------------------------------- Note: there is a basic proof-of-concept for that as a `uwsgi pypy plugin`_ @@ -154,6 +154,8 @@ a dynamic-link library with whatever C API we want. This gives us a one-size-fits-all generic way to make .so/.dll files from Python. +This would fit well in a "redesign CFFI" work. + .. _`uwsgi pypy plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html Optimising cpyext (CPython C-API compatibility layer) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -85,3 +85,17 @@ .. branch: remove-intlong-smm kills int/long/smalllong/bool multimethods + +.. branch: numpy-refactor +Cleanup micronumpy module + +.. branch: int_w-refactor +In a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed. + +.. branch: test-58c3d8552833 +Fix for getarrayitem_gc_pure optimization + +.. branch: simple-range-strategy +Implements SimpleRangeListStrategy for case range(n) where n is a positive number. +Makes some traces nicer by getting rid of multiplication for calculating loop counter +and propagates that n > 0 further to get rid of guards. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -84,8 +84,9 @@ # register the minimal equivalent of running a small piece of code. This # should be used as sparsely as possible, just to register callbacks - from rpython.rlib.entrypoint import entrypoint + from rpython.rlib.entrypoint import entrypoint, RPython_StartupCode from rpython.rtyper.lltypesystem import rffi, lltype + from rpython.rtyper.lltypesystem.lloperation import llop w_pathsetter = space.appexec([], """(): def f(path): @@ -120,20 +121,38 @@ debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space).encode('utf-8')) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return 1 + return -1 @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): + after = rffi.aroundstate.after + if after: after() source = rffi.charp2str(ll_source) res = _pypy_execute_source(source) + before = rffi.aroundstate.before + if before: before() return rffi.cast(rffi.INT, res) + @entrypoint('main', [rffi.CCHARP, lltype.Signed], + c_name='pypy_execute_source_ptr') + def pypy_execute_source_ptr(ll_source, ll_ptr): + after = rffi.aroundstate.after + if after: after() + source = rffi.charp2str(ll_source) + space.setitem(w_globals, space.wrap('c_argument'), + space.wrap(ll_ptr)) + res = _pypy_execute_source(source) + before = rffi.aroundstate.before + if before: before() + return rffi.cast(rffi.INT, res) + @entrypoint('main', [], c_name='pypy_init_threads') def pypy_init_threads(): if not space.config.objspace.usemodules.thread: return os_thread.setup_threads(space) - rffi.aroundstate.before() + before = rffi.aroundstate.before + if before: before() @entrypoint('main', [], c_name='pypy_thread_attach') def pypy_thread_attach(): @@ -144,7 +163,8 @@ rthread.gc_thread_start() os_thread.bootstrapper.nbthreads += 1 os_thread.bootstrapper.release() - rffi.aroundstate.before() + before = rffi.aroundstate.before + if before: before() w_globals = space.newdict() space.setitem(w_globals, space.wrap('__builtins__'), @@ -159,10 +179,11 @@ debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space).encode('utf-8')) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return 1 + return -1 return 0 return entry_point, {'pypy_execute_source': pypy_execute_source, + 'pypy_execute_source_ptr': pypy_execute_source_ptr, 'pypy_init_threads': pypy_init_threads, 'pypy_thread_attach': pypy_thread_attach, 'pypy_setup_home': pypy_setup_home} diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -92,7 +92,7 @@ i = 2 * HUGEVAL_BYTES addrstring = [' '] * i while True: - n = space.int_w(space.and_(w_id, w_0x0F)) + n = space.int_w(space.and_(w_id, w_0x0F), allow_conversion=False) n += ord('0') if n > ord('9'): n += (ord('a') - ord('9') - 1) @@ -203,16 +203,38 @@ def identifier_w(self, space): self._typed_unwrap_error(space, "string") - def int_w(self, space): + def int_w(self, space, allow_conversion=True): + # note that W_IntObject.int_w has a fast path and W_FloatObject.int_w + # raises w_TypeError + w_obj = self + if allow_conversion: + w_obj = space.int(self) + return w_obj._int_w(space) + + def _int_w(self, space): self._typed_unwrap_error(space, "integer") - def float_w(self, space): + def float_w(self, space, allow_conversion=True): + w_obj = self + if allow_conversion: + w_obj = space.float(self) + return w_obj._float_w(space) + + def _float_w(self, space): self._typed_unwrap_error(space, "float") def uint_w(self, space): self._typed_unwrap_error(space, "integer") - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): + # note that W_IntObject and W_LongObject have fast paths, + # W_FloatObject.rbigint_w raises w_TypeError raises + w_obj = self + if allow_conversion: + w_obj = space.int(self) + return w_obj._bigint_w(space) + + def _bigint_w(self, space): self._typed_unwrap_error(space, "integer") def _typed_unwrap_error(self, space, expected): @@ -222,8 +244,7 @@ def int(self, space): w_impl = space.lookup(self, '__int__') if w_impl is None: - raise oefmt(space.w_TypeError, - "unsupported operand type for int(): '%T'", self) + self._typed_unwrap_error(space, "integer") w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_int): @@ -1189,7 +1210,7 @@ assert isinstance(w_index_or_slice, W_SliceObject) start, stop, step = w_index_or_slice.indices3(self, seqlength) else: - start = self.int_w(w_index_or_slice) + start = self.int_w(w_index_or_slice, allow_conversion=False) if start < 0: start += seqlength if not (0 <= start < seqlength): @@ -1210,7 +1231,7 @@ start, stop, step, length = w_index_or_slice.indices4(self, seqlength) else: - start = self.int_w(w_index_or_slice) + start = self.int_w(w_index_or_slice, allow_conversion=False) if start < 0: start += seqlength if not (0 <= start < seqlength): @@ -1234,7 +1255,10 @@ raise oefmt(self.w_TypeError, "%s must be an integer, not %T", objdescr, w_obj) try: - index = self.int_w(w_index) + # allow_conversion=False it's not really necessary because the + # return type of __index__ is already checked by space.index(), + # but there is no reason to allow conversions anyway + index = self.int_w(w_index, allow_conversion=False) except OperationError, err: if not err.match(self, self.w_OverflowError): raise @@ -1251,28 +1275,16 @@ else: return index - def getslice(space, w_obj, w_start, w_stop): - w_slice = space.newslice(w_start, w_stop, space.w_None) - return space.getitem(w_obj, w_slice) - - def setslice(space, w_obj, w_start, w_stop, w_sequence): - w_slice = space.newslice(w_start, w_stop, space.w_None) - return space.setitem(w_obj, w_slice, w_sequence) - - def delslice(space, w_obj, w_start, w_stop): - w_slice = space.newslice(w_start, w_stop, space.w_None) - return space.delitem(w_obj, w_slice) - - def r_longlong_w(self, w_obj): - bigint = self.bigint_w(w_obj) + def r_longlong_w(self, w_obj, allow_conversion=True): + bigint = self.bigint_w(w_obj, allow_conversion) try: return bigint.tolonglong() except OverflowError: raise OperationError(self.w_OverflowError, self.wrap('integer too large')) - def r_ulonglong_w(self, w_obj): - bigint = self.bigint_w(w_obj) + def r_ulonglong_w(self, w_obj, allow_conversion=True): + bigint = self.bigint_w(w_obj, allow_conversion) try: return bigint.toulonglong() except OverflowError: @@ -1380,8 +1392,19 @@ 'argument must be a string without NUL characters')) return rstring.assert_str0(result) - def int_w(self, w_obj): - return w_obj.int_w(self) + def int_w(self, w_obj, allow_conversion=True): + """ + Unwrap an app-level int object into an interpret-level int. + + If allow_conversion==True, w_obj might be of any type which implements + __int__, *except* floats which are explicitly rejected. This is the + same logic as CPython's PyArg_ParseTuple. If you want to also allow + floats, you can call space.int_w(space.int(w_obj)). + + If allow_conversion=False, w_obj needs to be an app-level int or a + subclass. + """ + return w_obj.int_w(self, allow_conversion) def int(self, w_obj): return w_obj.int(self) @@ -1389,11 +1412,19 @@ def uint_w(self, w_obj): return w_obj.uint_w(self) - def bigint_w(self, w_obj): - return w_obj.bigint_w(self) + def bigint_w(self, w_obj, allow_conversion=True): + """ + Like int_w, but return a rlib.rbigint object and call __long__ if + allow_conversion is True. + """ + return w_obj.bigint_w(self, allow_conversion) - def float_w(self, w_obj): - return w_obj.float_w(self) + def float_w(self, w_obj, allow_conversion=True): + """ + Like int_w, but return an interp-level float and call __float__ if + allow_conversion is True. + """ + return w_obj.float_w(self, allow_conversion) def realstr_w(self, w_obj): # Like str_w, but only works if w_obj is really of type 'str'. @@ -1457,20 +1488,10 @@ return w_obj.ord(self) # This is all interface for gateway.py. - def gateway_int_w(self, w_obj): - if self.isinstance_w(w_obj, self.w_float): - raise OperationError(self.w_TypeError, - self.wrap("integer argument expected, got float")) - return self.int_w(self.int(w_obj)) - - def gateway_float_w(self, w_obj): - return self.float_w(self.float(w_obj)) - - def gateway_r_longlong_w(self, w_obj): - if self.isinstance_w(w_obj, self.w_float): - raise OperationError(self.w_TypeError, - self.wrap("integer argument expected, got float")) - return self.r_longlong_w(self.int(w_obj)) + gateway_int_w = int_w + gateway_float_w = float_w + gateway_r_longlong_w = r_longlong_w + gateway_r_ulonglong_w = r_ulonglong_w def gateway_r_uint_w(self, w_obj): if self.isinstance_w(w_obj, self.w_float): @@ -1478,12 +1499,6 @@ self.wrap("integer argument expected, got float")) return self.uint_w(self.int(w_obj)) - def gateway_r_ulonglong_w(self, w_obj): - if self.isinstance_w(w_obj, self.w_float): - raise OperationError(self.w_TypeError, - self.wrap("integer argument expected, got float")) - return self.r_ulonglong_w(self.int(w_obj)) - def gateway_nonnegint_w(self, w_obj): # Like space.gateway_int_w(), but raises an app-level ValueError if # the integer is negative. Here for gateway.py. @@ -1505,7 +1520,7 @@ def c_uint_w(self, w_obj): # Like space.gateway_uint_w(), but raises an app-level OverflowError if # the integer does not fit in 32 bits. Here for gateway.py. - value = self.gateway_r_uint_w(w_obj) + value = self.uint_w(w_obj) if value > UINT_MAX_32_BITS: raise OperationError(self.w_OverflowError, self.wrap("expected an unsigned 32-bit integer")) @@ -1515,7 +1530,7 @@ # Like space.gateway_int_w(), but raises an app-level ValueError if # the integer is negative or does not fit in 32 bits. Here # for gateway.py. - value = self.gateway_int_w(w_obj) + value = self.int_w(w_obj) if value < 0: raise OperationError(self.w_ValueError, self.wrap("expected a non-negative integer")) @@ -1524,22 +1539,22 @@ self.wrap("expected a 32-bit integer")) return value - def truncatedint_w(self, w_obj): + def truncatedint_w(self, w_obj, allow_conversion=True): # Like space.gateway_int_w(), but return the integer truncated # instead of raising OverflowError. For obscure cases only. try: - return self.int_w(w_obj) + return self.int_w(w_obj, allow_conversion) except OperationError, e: if not e.match(self, self.w_OverflowError): raise from rpython.rlib.rarithmetic import intmask return intmask(self.bigint_w(w_obj).uintmask()) - def truncatedlonglong_w(self, w_obj): + def truncatedlonglong_w(self, w_obj, allow_conversion=True): # Like space.gateway_r_longlong_w(), but return the integer truncated # instead of raising OverflowError. try: - return self.r_longlong_w(w_obj) + return self.r_longlong_w(w_obj, allow_conversion) except OperationError, e: if not e.match(self, self.w_OverflowError): raise diff --git a/pypy/interpreter/interactive.py b/pypy/interpreter/interactive.py --- a/pypy/interpreter/interactive.py +++ b/pypy/interpreter/interactive.py @@ -189,8 +189,7 @@ try: code.exec_code(self.space, self.w_globals, self.w_globals) finally: - if self.tracelevel: - self.space.unsettrace() + self.unsettrace() self.checktrace() # run doit() in an exception-catching box @@ -203,7 +202,38 @@ def settrace(self): if self.tracelevel: - self.space.settrace() + ec = self.space.getexecutioncontext() + if not hasattr(self, '_orig_bytecode_only_trace'): + self._orig_bytecode_only_trace = ec.bytecode_only_trace + ec.bytecode_only_trace = self._do_bytecode_only_trace + + def unsettrace(self): + if self.tracelevel: + ec = self.space.getexecutioncontext() + ec.bytecode_only_trace = self._orig_bytecode_only_trace + + def _do_bytecode_only_trace(self, frame): + from pypy.tool.pydis import Bytecode, HAVE_ARGUMENT + + if frame.hide(): + return + + self.unsettrace() + next_instr = frame.last_instr + opcode = ord(frame.pycode.co_code[next_instr]) + + oparg = 0 + if opcode >= HAVE_ARGUMENT: + lo = ord(frame.pycode.co_code[next_instr+1]) + hi = ord(frame.pycode.co_code[next_instr+2]) + oparg = (hi * 256) | lo + + class fake: + code = frame.pycode + bytecode = Bytecode(fake, next_instr, oparg, 0) + print '\t%-19s %s' % (str(frame.pycode.co_name) + ':', + bytecode.repr_with_space(self.space)) + self.settrace() def checktrace(self): s = self.space @@ -213,11 +243,11 @@ s.wrap("__pytrace__"))) if self.tracelevel > 0 and tracelevel == 0: - s.reset_trace() + self.unsettrace() print "Tracing disabled" if self.tracelevel == 0 and tracelevel > 0: - self.space.unsettrace() + self.unsettrace() print "Tracing enabled" self.tracelevel = tracelevel diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py --- a/pypy/interpreter/main.py +++ b/pypy/interpreter/main.py @@ -120,7 +120,7 @@ exitcode = 0 else: try: - exitcode = space.int_w(w_exitcode) + exitcode = space.int_w(w_exitcode, allow_conversion=False) except OperationError: # not an integer: print it to stderr msg = space.str_w(space.str(w_exitcode)) diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py --- a/pypy/interpreter/test/test_argument.py +++ b/pypy/interpreter/test/test_argument.py @@ -113,7 +113,7 @@ def len(self, x): return len(x) - def int_w(self, x): + def int_w(self, x, allow_conversion=True): return x def eq_w(self, x, y): diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -469,6 +469,8 @@ space.wrapbytes('\x80')) def test_interp2app_unwrap_spec_typechecks(self): + from rpython.rlib.rarithmetic import r_longlong + space = self.space w = space.wrap def g3_id(space, x): @@ -503,6 +505,12 @@ raises(gateway.OperationError,space.call_function,w_app_g3_f,w(None)) raises(gateway.OperationError,space.call_function,w_app_g3_f,w("foo")) + app_g3_r = gateway.interp2app_temp(g3_id, + unwrap_spec=[gateway.ObjSpace, + r_longlong]) + w_app_g3_r = space.wrap(app_g3_r) + raises(gateway.OperationError,space.call_function,w_app_g3_r,w(1.0)) + def test_interp2app_unwrap_spec_unicode(self): space = self.space w = space.wrap diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -161,6 +161,40 @@ self.space.setattr(w_instance, self.space.wrap("__call__"), w_func) assert not is_callable(w_instance) + + def test_int_w(self): + space = self.space + w_x = space.wrap(42) + assert space.int_w(w_x) == 42 + assert space.int_w(w_x, allow_conversion=False) == 42 + # + w_x = space.wrap(44.0) + space.raises_w(space.w_TypeError, space.int_w, w_x) + space.raises_w(space.w_TypeError, space.int_w, w_x, allow_conversion=False) + # + w_instance = self.space.appexec([], """(): + class MyInt(object): + def __int__(self): + return 43 + return MyInt() + """) + assert space.int_w(w_instance) == 43 + space.raises_w(space.w_TypeError, space.int_w, w_instance, allow_conversion=False) + # + w_instance = self.space.appexec([], """(): + class MyInt(object): + def __int__(self): + return 43 + + class AnotherInt(object): + def __int__(self): + return MyInt() + + return AnotherInt() + """) + space.raises_w(space.w_TypeError, space.int_w, w_instance) + space.raises_w(space.w_TypeError, space.int_w, w_instance, allow_conversion=False) + def test_interp_w(self): w = self.space.wrap w_bltinfunction = self.space.builtin.get('len') diff --git a/pypy/interpreter/test/test_zpy.py b/pypy/interpreter/test/test_zpy.py --- a/pypy/interpreter/test/test_zpy.py +++ b/pypy/interpreter/test/test_zpy.py @@ -7,16 +7,19 @@ pypypath = py.path.local(pypy.__file__).dirpath("bin", "pyinteractive.py") -def run(*args): +def run(*args, **kwds): + stdin = kwds.pop('stdin', '') + assert not kwds argslist = map(str, args) - popen = subprocess.Popen(argslist, stdout=subprocess.PIPE) - stdout, stderr = popen.communicate() - print '--- stdout ---' - print stdout - print - print '--- stderr ---' - print stderr - print + popen = subprocess.Popen(argslist, stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + stdout, stderr = popen.communicate(stdin) + print('--- stdout ---') + print(stdout) + print() + print('--- stderr ---') + print(stderr) + print() return stdout @@ -105,3 +108,19 @@ stderr=subprocess.PIPE) _, stderr = popen.communicate() assert 'KeyError: \n' in stderr + + +def test_pytrace(): + output = run(sys.executable, pypypath, '-S', + stdin="__pytrace__ = 1\nx = 5\nx") + assert ('\t: LOAD_CONST 0 (5)\n' + '\t: STORE_NAME 0 (x)\n' + '\t: LOAD_CONST 1 (None)\n' + '\t: RETURN_VALUE 0 \n' + '>>>> ') in output + assert ('\t: LOAD_NAME 0 (x)\n' + '\t: PRINT_EXPR 0 \n' + # '5\n' --- this line sent to stderr + '\t: LOAD_CONST 0 (None)\n' + '\t: RETURN_VALUE 0 \n' + '>>>> ') in output diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/module/__builtin__/test/test_buffer.py --- a/pypy/module/__builtin__/test/test_buffer.py +++ b/pypy/module/__builtin__/test/test_buffer.py @@ -59,6 +59,18 @@ def test_hash(self): raises(TypeError, "hash(memoryview(b'hello'))") + def test_getitem_only_ints(self): + class MyInt(object): + def __init__(self, x): + self.x = x + + def __int__(self): + return self.x + + buf = buffer('hello world') + raises(TypeError, "buf[MyInt(0)]") + raises(TypeError, "buf[MyInt(0):MyInt(5)]") + def test_rw(self): data = bytearray(b'abcefg') v = memoryview(data) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -87,6 +87,8 @@ l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" l = range(3) + assert list_strategy(l) == "simple_range" + l = range(1, 2) assert list_strategy(l) == "range" l = [1, "b", 3] assert list_strategy(l) == "object" diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -14,6 +14,8 @@ _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. + # XXX this could be improved with an elidable method get_size() + # that raises in case it's still -1... cast_anything = False is_primitive_integer = False diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -137,13 +137,13 @@ else: return value try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) try: return bigint.tolonglong() except OverflowError: @@ -154,13 +154,13 @@ if space.is_w(space.type(w_ob), space.w_int): # shortcut return space.int_w(w_ob) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) try: return bigint.toint() except OverflowError: @@ -182,13 +182,13 @@ raise OperationError(space.w_OverflowError, space.wrap(neg_msg)) return r_ulonglong(value) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if strict and _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) if strict: try: return bigint.toulonglong() @@ -202,13 +202,13 @@ def as_unsigned_long(space, w_ob, strict): # same as as_unsigned_long_long(), but returning just an Unsigned try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if strict and _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) if strict: try: return bigint.touint() diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -114,20 +114,43 @@ # ____________________________________________________________ -SF_MSVC_BITFIELDS = 1 -SF_GCC_ARM_BITFIELDS = 2 -SF_GCC_BIG_ENDIAN = 4 -SF_PACKED = 8 + +SF_MSVC_BITFIELDS = 0x01 +SF_GCC_ARM_BITFIELDS = 0x02 +SF_GCC_X86_BITFIELDS = 0x10 + +SF_GCC_BIG_ENDIAN = 0x04 +SF_GCC_LITTLE_ENDIAN = 0x40 + +SF_PACKED = 0x08 + if sys.platform == 'win32': - DEFAULT_SFLAGS = SF_MSVC_BITFIELDS + DEFAULT_SFLAGS_PLATFORM = SF_MSVC_BITFIELDS else: if rffi_platform.getdefined('__arm__', ''): - DEFAULT_SFLAGS = SF_GCC_ARM_BITFIELDS + DEFAULT_SFLAGS_PLATFORM = SF_GCC_ARM_BITFIELDS else: - DEFAULT_SFLAGS = 0 - if sys.byteorder == 'big': - DEFAULT_SFLAGS |= SF_GCC_BIG_ENDIAN + DEFAULT_SFLAGS_PLATFORM = SF_GCC_X86_BITFIELDS + +if sys.byteorder == 'big': + DEFAULT_SFLAGS_ENDIAN = SF_GCC_BIG_ENDIAN +else: + DEFAULT_SFLAGS_ENDIAN = SF_GCC_LITTLE_ENDIAN + + +def complete_sflags(sflags): + # add one of the SF_xxx_BITFIELDS flags if none is specified + if not (sflags & (SF_MSVC_BITFIELDS | SF_GCC_ARM_BITFIELDS | + SF_GCC_X86_BITFIELDS)): + sflags |= DEFAULT_SFLAGS_PLATFORM + # add one of SF_GCC_xx_ENDIAN if none is specified + if not (sflags & (SF_GCC_BIG_ENDIAN | SF_GCC_LITTLE_ENDIAN)): + sflags |= DEFAULT_SFLAGS_ENDIAN + return sflags + +# ____________________________________________________________ + @unwrap_spec(name=str) def new_struct_type(space, name): @@ -140,8 +163,8 @@ @unwrap_spec(w_ctype=ctypeobj.W_CType, totalsize=int, totalalignment=int, sflags=int) def complete_struct_or_union(space, w_ctype, w_fields, w_ignored=None, - totalsize=-1, totalalignment=-1, - sflags=DEFAULT_SFLAGS): + totalsize=-1, totalalignment=-1, sflags=0): + sflags = complete_sflags(sflags) if (not isinstance(w_ctype, ctypestruct.W_CTypeStructOrUnion) or w_ctype.size >= 0): raise OperationError(space.w_TypeError, diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -359,6 +359,9 @@ assert x.load_function(BVoidP, 'strcpy') py.test.raises(KeyError, x.load_function, BVoidP, 'xxx_this_function_does_not_exist') + # the next one is from 'libm', not 'libc', but we assume + # that it is already loaded too, so it should work + assert x.load_function(BVoidP, 'sqrt') def test_hash_differences(): BChar = new_primitive_type("char") @@ -1418,8 +1421,10 @@ p = newp(BStructPtr, [12]) assert p.a1 == 12 e = py.test.raises(TypeError, newp, BStructPtr, [None]) - assert ("an integer is required" in str(e.value) or - "unsupported operand type for int(): 'NoneType'" in str(e.value)) #PyPy + msg = str(e.value) + assert ("an integer is required" in msg or # CPython + "unsupported operand type for int(): 'NoneType'" in msg or # old PyPys + "expected integer, got NoneType object" in msg) # newer PyPys py.test.raises(TypeError, 'p.a1 = "def"') if sys.version_info < (3,): BEnum2 = new_enum_type(unicode("foo"), (unicode('abc'),), (5,), BInt) @@ -2881,7 +2886,7 @@ ('b1', BInt, 9), ('b2', BUInt, 7), ('c', BChar, -1)], -1, -1, -1, flag) - if flag % 2 == 0: # gcc, any variant + if not (flag & SF_MSVC_BITFIELDS): # gcc, any variant assert typeoffsetof(BStruct, 'c') == (BChar, 3) assert sizeof(BStruct) == 4 else: # msvc @@ -2896,20 +2901,20 @@ p.c = b'\x9D' raw = buffer(p)[:] if sys.byteorder == 'little': - if flag == 0 or flag == 2: # gcc, little endian + if flag & SF_MSVC_BITFIELDS: + assert raw == b'A\x00\x00\x007\xC7\x00\x00\x9D\x00\x00\x00' + elif flag & SF_GCC_LITTLE_ENDIAN: assert raw == b'A7\xC7\x9D' - elif flag == 1: # msvc - assert raw == b'A\x00\x00\x007\xC7\x00\x00\x9D\x00\x00\x00' - elif flag == 4: # gcc, big endian + elif flag & SF_GCC_BIG_ENDIAN: assert raw == b'A\xE3\x9B\x9D' else: raise AssertionError("bad flag") else: - if flag == 0 or flag == 2: # gcc + if flag & SF_MSVC_BITFIELDS: + assert raw == b'A\x00\x00\x00\x00\x00\xC77\x9D\x00\x00\x00' + elif flag & SF_GCC_LITTLE_ENDIAN: assert raw == b'A\xC77\x9D' - elif flag == 1: # msvc - assert raw == b'A\x00\x00\x00\x00\x00\xC77\x9D\x00\x00\x00' - elif flag == 4: # gcc, big endian + elif flag & SF_GCC_BIG_ENDIAN: assert raw == b'A\x9B\xE3\x9D' else: raise AssertionError("bad flag") @@ -2919,18 +2924,15 @@ ('', BShort, 9), ('c', BChar, -1)], -1, -1, -1, flag) assert typeoffsetof(BStruct, 'c') == (BChar, 4) - if flag == 0: # gcc + if flag & SF_MSVC_BITFIELDS: + assert sizeof(BStruct) == 6 + assert alignof(BStruct) == 2 + elif flag & SF_GCC_X86_BITFIELDS: assert sizeof(BStruct) == 5 assert alignof(BStruct) == 1 - elif flag == 1: # msvc + elif flag & SF_GCC_ARM_BITFIELDS: assert sizeof(BStruct) == 6 assert alignof(BStruct) == 2 - elif flag == 2: # gcc ARM - assert sizeof(BStruct) == 6 - assert alignof(BStruct) == 2 - elif flag == 4: # gcc, big endian - assert sizeof(BStruct) == 5 - assert alignof(BStruct) == 1 else: raise AssertionError("bad flag") # @@ -2939,37 +2941,43 @@ ('', BInt, 0), ('', BInt, 0), ('c', BChar, -1)], -1, -1, -1, flag) - if flag == 0: # gcc + if flag & SF_MSVC_BITFIELDS: + assert typeoffsetof(BStruct, 'c') == (BChar, 1) + assert sizeof(BStruct) == 2 + assert alignof(BStruct) == 1 + elif flag & SF_GCC_X86_BITFIELDS: assert typeoffsetof(BStruct, 'c') == (BChar, 4) assert sizeof(BStruct) == 5 assert alignof(BStruct) == 1 - elif flag == 1: # msvc - assert typeoffsetof(BStruct, 'c') == (BChar, 1) - assert sizeof(BStruct) == 2 - assert alignof(BStruct) == 1 - elif flag == 2: # gcc ARM + elif flag & SF_GCC_ARM_BITFIELDS: assert typeoffsetof(BStruct, 'c') == (BChar, 4) assert sizeof(BStruct) == 8 assert alignof(BStruct) == 4 - elif flag == 4: # gcc, big endian - assert typeoffsetof(BStruct, 'c') == (BChar, 4) - assert sizeof(BStruct) == 5 - assert alignof(BStruct) == 1 else: raise AssertionError("bad flag") -def test_bitfield_as_gcc(): - _test_bitfield_details(flag=0) +SF_MSVC_BITFIELDS = 0x01 +SF_GCC_ARM_BITFIELDS = 0x02 +SF_GCC_X86_BITFIELDS = 0x10 + +SF_GCC_BIG_ENDIAN = 0x04 +SF_GCC_LITTLE_ENDIAN = 0x40 + +SF_PACKED = 0x08 + +def test_bitfield_as_x86_gcc(): + _test_bitfield_details(flag=SF_GCC_X86_BITFIELDS|SF_GCC_LITTLE_ENDIAN) def test_bitfield_as_msvc(): - _test_bitfield_details(flag=1) + _test_bitfield_details(flag=SF_MSVC_BITFIELDS|SF_GCC_LITTLE_ENDIAN) def test_bitfield_as_arm_gcc(): - _test_bitfield_details(flag=2) + _test_bitfield_details(flag=SF_GCC_ARM_BITFIELDS|SF_GCC_LITTLE_ENDIAN) -def test_bitfield_as_big_endian(): - _test_bitfield_details(flag=4) +def test_bitfield_as_ppc_gcc(): + # PowerPC uses the same format as X86, but is big-endian + _test_bitfield_details(flag=SF_GCC_X86_BITFIELDS|SF_GCC_BIG_ENDIAN) def test_struct_array_no_length(): @@ -3145,7 +3153,7 @@ complete_struct_or_union(BStruct, [('a1', BLong, -1), ('a2', BChar, -1), ('a3', BShort, -1)], - None, -1, -1, 8) # SF_PACKED==8 + None, -1, -1, SF_PACKED) d = BStruct.fields assert len(d) == 3 assert d[0][0] == 'a1' @@ -3174,7 +3182,7 @@ complete_struct_or_union, BStruct, [('a1', BLong, 30), ('a2', BChar, 5)], - None, -1, -1, 8) # SF_PACKED==8 + None, -1, -1, SF_PACKED) def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -159,7 +159,7 @@ subentry = ProfilerSubEntry(entry.frame) self.calls[entry] = subentry return subentry - return None + raise From noreply at buildbot.pypy.org Thu Mar 6 02:32:43 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 6 Mar 2014 02:32:43 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140306013243.B0A9E1C35CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69747:10973dec5068 Date: 2014-03-05 17:12 -0800 http://bitbucket.org/pypy/pypy/changeset/10973dec5068/ Log: merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -99,3 +99,6 @@ Implements SimpleRangeListStrategy for case range(n) where n is a positive number. Makes some traces nicer by getting rid of multiplication for calculating loop counter and propagates that n > 0 further to get rid of guards. + +.. branch: popen-pclose +Provide an exit status for popen'ed RFiles via pclose diff --git a/pypy/interpreter/special.py b/pypy/interpreter/special.py --- a/pypy/interpreter/special.py +++ b/pypy/interpreter/special.py @@ -2,16 +2,10 @@ class Ellipsis(W_Root): - def __init__(self, space): - self.space = space - - def descr__repr__(self): - return self.space.wrap('Ellipsis') + def descr__repr__(self, space): + return space.wrap('Ellipsis') class NotImplemented(W_Root): - def __init__(self, space): - self.space = space - - def descr__repr__(self): - return self.space.wrap('NotImplemented') + def descr__repr__(self, space): + return space.wrap('NotImplemented') diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -71,8 +71,8 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild - self.w_Ellipsis = special.Ellipsis(self) - self.w_NotImplemented = special.NotImplemented(self) + self.w_Ellipsis = special.Ellipsis() + self.w_NotImplemented = special.NotImplemented() def _freeze_(self): return True diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2319,6 +2319,16 @@ a[...] = 4 assert (a == [4, 4, 4]).all() + b = np.arange(24).reshape(2,3,4) + b[...] = 100 + assert (b == 100).all() + assert b.shape == (2, 3, 4) + b[...] = [10, 20, 30, 40] + assert (b[:,:,0] == 10).all() + assert (b[0,0,:] == [10, 20, 30, 40]).all() + assert b.shape == b[...].shape + assert (b == b[...]).all() + class AppTestNumArrayFromBuffer(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -59,8 +59,8 @@ self.w_None = W_NoneObject.w_None self.w_False = W_BoolObject.w_False self.w_True = W_BoolObject.w_True - self.w_NotImplemented = self.wrap(special.NotImplemented(self)) - self.w_Ellipsis = self.wrap(special.Ellipsis(self)) + self.w_NotImplemented = self.wrap(special.NotImplemented()) + self.w_Ellipsis = self.wrap(special.Ellipsis()) # types self.builtin_types = {} diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -543,10 +543,13 @@ assert a == 9007199254740991 a = operator.truediv(x, 7) assert a == 9007199254740991.0 - exec("from __future__ import division; " - "a = x / 7; b = operator.truediv(x, 7)") - assert a == 9007199254740991.0 - assert b == 9007199254740991.0 + + def test_truediv_future(self): + ns = dict(x=63050394783186940) + exec("from __future__ import division; import operator; " + "a = x / 7; b = operator.truediv(x, 7)", ns) + assert ns['a'] == 9007199254740991.0 + assert ns['b'] == 9007199254740991.0 class AppTestIntShortcut(AppTestInt): diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -433,7 +433,7 @@ intlist.find(w(4), 0, 2) -class AppTestW_ListObject(object): +class AppTestListObject(object): def setup_class(cls): import platform import sys @@ -526,6 +526,18 @@ l.__init__(assignment) assert l == list(assignment) + def test_range_init(self): + x = range(5,1) + assert x == [] + + x = range(1,10) + x[22:0:-1] == range(1,10) + + r = range(10, 10) + assert len(r) == 0 + assert list(reversed(r)) == [] + assert r[:] == [] + def test_extend_list(self): l = l0 = [1] l.extend([2]) @@ -598,24 +610,28 @@ def test_sort_key(self): def lower(x): return x.lower() l = ['a', 'C', 'b'] - l.sort(key = lower) + l.sort(key=lower) assert l == ['a', 'b', 'C'] l = [] - l.sort(key = lower) + l.sort(key=lower) assert l == [] - l = [ 'a' ] - l.sort(key = lower) - assert l == [ 'a' ] + l = ['a'] + l.sort(key=lower) + assert l == ['a'] + + r = range(10) + r.sort(key=lambda x: -x) + assert r == range(9, -1, -1) def test_sort_reversed(self): l = list(range(10)) - l.sort(reverse = True) + l.sort(reverse=True) assert l == list(range(9, -1, -1)) l = [] - l.sort(reverse = True) + l.sort(reverse=True) assert l == [] l = [1] - l.sort(reverse = True) + l.sort(reverse=True) assert l == [1] raises(TypeError, sorted, [], None, lambda x, y: 0) @@ -630,6 +646,17 @@ l.sort() assert l == ["a", "b", "c", "d"] + def test_sort_range(self): + l = range(3, 10, 3) + l.sort() + assert l == [3, 6, 9] + l.sort(reverse=True) + assert l == [9, 6, 3] + l.sort(reverse=True) + assert l == [9, 6, 3] + l.sort() + assert l == [3, 6, 9] + def test_getitem(self): l = [1, 2, 3, 4, 5, 6, 9] assert l[0] == 1 @@ -653,6 +680,23 @@ l = [] raises(IndexError, "l[1]") + def test_getitem_range(self): + l = range(5) + raises(IndexError, "l[-6]") + raises(IndexError, "l[5]") + assert l[0] == 0 + assert l[-1] == 4 + assert l[-2] == 3 + assert l[-5] == 0 + + l = range(1, 5) + raises(IndexError, "l[-5]") + raises(IndexError, "l[4]") + assert l[0] == 1 + assert l[-1] == 4 + assert l[-2] == 3 + assert l[-4] == 1 + def test_setitem(self): l = [] raises(IndexError, "l[1] = 2") @@ -665,6 +709,10 @@ l[0] = "2" assert l == ["2",3] + l = range(3) + l[0] = 1 + assert l == [1,1,2] + def test_delitem(self): l = [1, 2, 3, 4, 5, 6, 9] del l[0] @@ -730,6 +778,29 @@ assert l[1:0:None] == [] assert l[1:0] == [] + def test_getslice_invalid(self): + x = [1,2,3,4] + assert x[10:0] == [] + assert x[10:0:None] == [] + + x = range(1,5) + assert x[10:0] == [] + assert x[10:0:None] == [] + + assert x[0:22] == [1,2,3,4] + assert x[-1:10] == [4] + + assert x[0:22:None] == [1,2,3,4] + assert x[-1:10:None] == [4] + + def test_getslice_range_backwards(self): + x = range(1,10) + assert x[22:-10] == [] + assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1] + assert x[10:3:-1] == [9,8,7,6,5] + assert x[10:3:-2] == [9,7,5] + assert x[1:5:-1] == [] + def test_delall(self): l = l0 = [1,2,3] del l[:] @@ -763,6 +834,13 @@ l1 += [0] assert l1 == ['a', 'b', 'c', 0] + r1 = r2 = range(5) + assert r1 is r2 + r1 += [15] + assert r1 is r2 + assert r1 == [0, 1, 2, 3, 4, 15] + assert r2 == [0, 1, 2, 3, 4, 15] + def test_iadd_iterable(self): l = l0 = [1,2,3] l += iter([4,5]) @@ -813,6 +891,17 @@ assert l is l0 assert l == [1.1, 2.2, 1.1, 2.2] + l = list(range(2)) + l *= 2 + assert l == [0, 1, 0, 1] + + r1 = r2 = list(range(3)) + assert r1 is r2 + r1 *= 2 + assert r1 is r2 + assert r1 == [0, 1, 2, 0, 1, 2] + assert r2 == [0, 1, 2, 0, 1, 2] + def test_mul_errors(self): try: [1, 2, 3] * (3,) @@ -894,6 +983,11 @@ assert l == [] assert l is l0 + l = [] + l2 = range(3) + l.__setslice__(0,3,l2) + assert l == [0,1,2] + def test_assign_extended_slice(self): l = l0 = ['a', 'b', 'c'] l[::-1] = ['a', 'b', 'c'] @@ -980,10 +1074,6 @@ l.append(x) assert l == list(range(5)) - l = list(range(4)) - l.append(4) - assert l == list(range(5)) - l = [1,2,3] l.append("a") assert l == [1,2,3,"a"] @@ -992,6 +1082,22 @@ l.append(4.4) assert l == [1.1, 2.2, 3.3, 4.4] + l = range(4) + l.append(4) + assert l == range(5) + + l = range(5) + l.append(26) + assert l == [0,1,2,3,4,26] + + l = range(5) + l.append("a") + assert l == [0,1,2,3,4,"a"] + + l = range(5) + l.append(5) + assert l == [0,1,2,3,4,5] + def test_count(self): c = list('hello') assert c.count('l') == 2 @@ -1019,6 +1125,10 @@ l.insert(0,"a") assert l == ["a", 1, 2, 3] + l = range(3) + l.insert(1,5) + assert l == [0,5,1,2] + def test_pop(self): c = list('hello world') s = '' @@ -1031,6 +1141,7 @@ l = list(range(10)) l.pop() assert l == list(range(9)) + assert l.pop(0) == 0 l = [1.1, 2.2, 3.3] l.pop() @@ -1101,6 +1212,16 @@ c.reverse() assert ''.join(c) == 'dlrow olleh' + l = range(3) + l.reverse() + assert l == [2,1,0] + + r = range(3) + r[0] = 1 + assert r == [1, 1, 2] + r.reverse() + assert r == [2, 1, 1] + def test_reversed(self): assert list(list('hello').__reversed__()) == ['o', 'l', 'l', 'e', 'h'] assert list(reversed(list('hello'))) == ['o', 'l', 'l', 'e', 'h'] @@ -1377,7 +1498,114 @@ assert item11 in l[::11] -class AppTestWithoutStrategies(object): +class AppTestListObjectWithRangeList(AppTestListObject): + """Run the list object tests with range lists enabled. Tests should go in + AppTestListObject so they can be run -A against CPython as well. + """ + spaceconfig = {"objspace.std.withrangelist": True} + + +class AppTestRangeListForcing: + """Tests for range lists that test forcing. Regular tests should go in + AppTestListObject so they can be run -A against CPython as well. Separate + from AppTestListObjectWithRangeList so we don't silently overwrite tests + with the same names. + """ + spaceconfig = {"objspace.std.withrangelist": True} + + def setup_class(cls): + if cls.runappdirect: + py.test.skip("__pypy__.internal_repr() cannot be used to see " + "if a range list was forced on top of pypy-c") + cls.w_not_forced = cls.space.appexec([], """(): + import __pypy__ + def f(r): + return (isinstance(r, list) and + "RangeListStrategy" in __pypy__.internal_repr(r)) + return f + """) + + def test_simple(self): + result = [] + r = range(1, 8, 2) + for i in r: + result.append(i) + assert result == [1, 3, 5, 7] + assert self.not_forced(r) + + def test_getitem_slice(self): + result = [] + r = range(1, 100, 2) + for i in r[10:15]: + result.append(i) + assert result == [21, 23, 25, 27, 29] + assert not self.not_forced(r) + + def test_getitem_extended_slice(self): + result = [] + r = range(1, 100, 2) + for i in r[40:30:-2]: + result.append(i) + assert result == [81, 77, 73, 69, 65] + assert not self.not_forced(r) + + def test_repr(self): + r = range(5) + assert repr(r) == "[0, 1, 2, 3, 4]" + assert self.not_forced(r) + + def test_force(self): + r = range(10) + r[0] = 42 + assert not self.not_forced(r) + assert r == [42, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + def test_reverse(self): + r = range(10) + r.reverse() + assert not self.not_forced(r) + assert r == range(9, -1, -1) + + def test_pop(self): + # RangeListStrategy + r = range(1, 10) + res = r.pop() + assert res == 9 + assert self.not_forced(r) + assert repr(r) == repr(range(1, 9)) + res = r.pop(0) + assert res == 1 + assert self.not_forced(r) + assert repr(r) == repr(range(2, 9)) + res = r.pop(len(r) - 1) + assert res == 8 + assert self.not_forced(r) + assert repr(r) == repr(range(2, 8)) + res = r.pop(2) + assert res == 4 + assert not self.not_forced(r) + assert r == [2, 3, 5, 6, 7] + res = r.pop(2) + assert res == 5 + assert not self.not_forced(r) + assert r == [2, 3, 6, 7] + + # SimpleRangeListStrategy + r = range(10) + res = r.pop() + assert res == 9 + assert self.not_forced(r) + res = r.pop() + assert res == 8 + assert repr(r) == repr(range(8)) + assert self.not_forced(r) + res = r.pop(0) + assert res == 0 + assert not self.not_forced(r) + assert r == [1, 2, 3, 4, 5, 6, 7] + + +class AppTestWithoutStrategies: spaceconfig = {"objspace.std.withliststrategies": False} def test_no_shared_empty_list(self): diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -10,7 +10,6 @@ py.test.py3k_skip("XXX: strategies are currently broken") class TestW_ListStrategies(TestW_ListObject): - def test_check_strategy(self): space = self.space w = space.wrap @@ -237,7 +236,6 @@ l.setslice(0, 1, 2, make_range_list(space, 5, 1, 4)) assert isinstance(l.strategy, IntegerListStrategy) - def test_setslice_List(self): space = self.space @@ -710,7 +708,6 @@ w_l2.sort(False) assert space.eq_w(w_l, w_l2) - def test_listview_bytes_list(self): space = self.space w_l = W_ListObject(space, [space.wrapbytes("a"), space.wrapbytes("b")]) From noreply at buildbot.pypy.org Thu Mar 6 02:32:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 6 Mar 2014 02:32:45 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to py3k and kill new withrangelist tests Message-ID: <20140306013245.059551C35CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69748:19a9d0a0df2f Date: 2014-03-05 17:31 -0800 http://bitbucket.org/pypy/pypy/changeset/19a9d0a0df2f/ Log: adapt to py3k and kill new withrangelist tests diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -527,13 +527,13 @@ assert l == list(assignment) def test_range_init(self): - x = range(5,1) + x = list(range(5,1)) assert x == [] - x = range(1,10) + x = list(range(1,10)) x[22:0:-1] == range(1,10) - r = range(10, 10) + r = list(range(10, 10)) assert len(r) == 0 assert list(reversed(r)) == [] assert r[:] == [] @@ -619,9 +619,9 @@ l.sort(key=lower) assert l == ['a'] - r = range(10) + r = list(range(10)) r.sort(key=lambda x: -x) - assert r == range(9, -1, -1) + assert r == list(range(9, -1, -1)) def test_sort_reversed(self): l = list(range(10)) @@ -647,7 +647,7 @@ assert l == ["a", "b", "c", "d"] def test_sort_range(self): - l = range(3, 10, 3) + l = list(range(3, 10, 3)) l.sort() assert l == [3, 6, 9] l.sort(reverse=True) @@ -709,7 +709,7 @@ l[0] = "2" assert l == ["2",3] - l = range(3) + l = list(range(3)) l[0] = 1 assert l == [1,1,2] @@ -783,7 +783,7 @@ assert x[10:0] == [] assert x[10:0:None] == [] - x = range(1,5) + x = list(range(1,5)) assert x[10:0] == [] assert x[10:0:None] == [] @@ -794,7 +794,7 @@ assert x[-1:10:None] == [4] def test_getslice_range_backwards(self): - x = range(1,10) + x = list(range(1,10)) assert x[22:-10] == [] assert x[22:-10:-1] == [9,8,7,6,5,4,3,2,1] assert x[10:3:-1] == [9,8,7,6,5] @@ -834,7 +834,7 @@ l1 += [0] assert l1 == ['a', 'b', 'c', 0] - r1 = r2 = range(5) + r1 = r2 = list(range(5)) assert r1 is r2 r1 += [15] assert r1 is r2 @@ -985,7 +985,7 @@ l = [] l2 = range(3) - l.__setslice__(0,3,l2) + l.__setitem__(slice(0,3),l2) assert l == [0,1,2] def test_assign_extended_slice(self): @@ -1082,19 +1082,19 @@ l.append(4.4) assert l == [1.1, 2.2, 3.3, 4.4] - l = range(4) + l = list(range(4)) l.append(4) - assert l == range(5) + assert l == list(range(5)) - l = range(5) + l = list(range(5)) l.append(26) assert l == [0,1,2,3,4,26] - l = range(5) + l = list(range(5)) l.append("a") assert l == [0,1,2,3,4,"a"] - l = range(5) + l = list(range(5)) l.append(5) assert l == [0,1,2,3,4,5] @@ -1125,7 +1125,7 @@ l.insert(0,"a") assert l == ["a", 1, 2, 3] - l = range(3) + l = list(range(3)) l.insert(1,5) assert l == [0,5,1,2] @@ -1212,11 +1212,11 @@ c.reverse() assert ''.join(c) == 'dlrow olleh' - l = range(3) + l = list(range(3)) l.reverse() assert l == [2,1,0] - r = range(3) + r = list(range(3)) r[0] = 1 assert r == [1, 1, 2] r.reverse() @@ -1498,113 +1498,6 @@ assert item11 in l[::11] -class AppTestListObjectWithRangeList(AppTestListObject): - """Run the list object tests with range lists enabled. Tests should go in - AppTestListObject so they can be run -A against CPython as well. - """ - spaceconfig = {"objspace.std.withrangelist": True} - - -class AppTestRangeListForcing: - """Tests for range lists that test forcing. Regular tests should go in - AppTestListObject so they can be run -A against CPython as well. Separate - from AppTestListObjectWithRangeList so we don't silently overwrite tests - with the same names. - """ - spaceconfig = {"objspace.std.withrangelist": True} - - def setup_class(cls): - if cls.runappdirect: - py.test.skip("__pypy__.internal_repr() cannot be used to see " - "if a range list was forced on top of pypy-c") - cls.w_not_forced = cls.space.appexec([], """(): - import __pypy__ - def f(r): - return (isinstance(r, list) and - "RangeListStrategy" in __pypy__.internal_repr(r)) - return f - """) - - def test_simple(self): - result = [] - r = range(1, 8, 2) - for i in r: - result.append(i) - assert result == [1, 3, 5, 7] - assert self.not_forced(r) - - def test_getitem_slice(self): - result = [] - r = range(1, 100, 2) - for i in r[10:15]: - result.append(i) - assert result == [21, 23, 25, 27, 29] - assert not self.not_forced(r) - - def test_getitem_extended_slice(self): - result = [] - r = range(1, 100, 2) - for i in r[40:30:-2]: - result.append(i) - assert result == [81, 77, 73, 69, 65] - assert not self.not_forced(r) - - def test_repr(self): - r = range(5) - assert repr(r) == "[0, 1, 2, 3, 4]" - assert self.not_forced(r) - - def test_force(self): - r = range(10) - r[0] = 42 - assert not self.not_forced(r) - assert r == [42, 1, 2, 3, 4, 5, 6, 7, 8, 9] - - def test_reverse(self): - r = range(10) - r.reverse() - assert not self.not_forced(r) - assert r == range(9, -1, -1) - - def test_pop(self): - # RangeListStrategy - r = range(1, 10) - res = r.pop() - assert res == 9 - assert self.not_forced(r) - assert repr(r) == repr(range(1, 9)) - res = r.pop(0) - assert res == 1 - assert self.not_forced(r) - assert repr(r) == repr(range(2, 9)) - res = r.pop(len(r) - 1) - assert res == 8 - assert self.not_forced(r) - assert repr(r) == repr(range(2, 8)) - res = r.pop(2) - assert res == 4 - assert not self.not_forced(r) - assert r == [2, 3, 5, 6, 7] - res = r.pop(2) - assert res == 5 - assert not self.not_forced(r) - assert r == [2, 3, 6, 7] - - # SimpleRangeListStrategy - r = range(10) - res = r.pop() - assert res == 9 - assert self.not_forced(r) - res = r.pop() - assert res == 8 - assert repr(r) == repr(range(8)) - assert self.not_forced(r) - res = r.pop(0) - assert res == 0 - assert not self.not_forced(r) - assert r == [1, 2, 3, 4, 5, 6, 7] - - class AppTestWithoutStrategies: spaceconfig = {"objspace.std.withliststrategies": False} From noreply at buildbot.pypy.org Thu Mar 6 09:08:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 09:08:44 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: hack hack hack in-progress Message-ID: <20140306080844.CE52B1C35CC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69749:499acfed5c41 Date: 2014-03-06 09:07 +0100 http://bitbucket.org/pypy/pypy/changeset/499acfed5c41/ Log: hack hack hack in-progress diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -10,6 +10,7 @@ from rpython.rlib.debug import ll_assert from rpython.rlib.rarithmetic import LONG_BIT, r_uint from rpython.rtyper.extregistry import ExtRegistryEntry +from rpython.translator.stm import stmgcintf WORD = LONG_BIT // 8 NULL = llmemory.NULL @@ -45,7 +46,7 @@ GCHDRP = lltype.Ptr(GCHDR) GCHDRSIZE = 3 * WORD - HDR = rffi.COpaque('struct stm_object_s') + HDR = stmgcintf.GCPTR.TO H_TID = 0 H_REVISION = WORD H_ORIGINAL = WORD * 2 diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -64,7 +64,12 @@ self.instrument_ncounter = 0 + def with_stm(self): + return self.translator.config.translation.stm + def gettypedefnode(self, T, varlength=None): + if self.with_stm(): + varlength = None if varlength is None: key = T else: @@ -87,7 +92,7 @@ elif T == WeakRef: REALT = self.gcpolicy.get_real_weakref_type() node = self.gettypedefnode(REALT) - elif isinstance(T, OpaqueType) and T.__name__ == "struct stm_object_s": + elif isinstance(T, OpaqueType) and T.hints.get("is_stm_header", False): from rpython.translator.stm.funcgen import StmHeaderOpaqueDefNode node = StmHeaderOpaqueDefNode(self, T) else: @@ -97,6 +102,8 @@ return node def gettype(self, T, varlength=None, who_asks=None, argnames=[]): + if self.with_stm(): + varlength = None if isinstance(T, Primitive) or T == GCREF: return PrimitiveType[T] elif isinstance(T, Typedef): diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -428,12 +428,16 @@ cfiles = [self.c_source_filename] + self.extrafiles + list(module_files) if exe_name is not None: exe_name = targetdir.join(exe_name) + kwds = {} + if self.config.translation.stm: + kwds['cc'] = 'clang' # force the use of clang mk = self.translator.platform.gen_makefile( cfiles, self.eci, path=targetdir, exe_name=exe_name, headers_to_precompile=headers_to_precompile, no_precompile_cfiles = module_files, - shared=self.config.translation.shared) + shared=self.config.translation.shared, + **kwds) if self.has_profopt(): profopt = self.config.translation.profopt diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -43,6 +43,35 @@ Node.__init__(self, db) self.dependencies = set() + def make_full_type_name(self): + if self.db.with_stm() and self.LLTYPE._gckind == 'gc': + assert self.typetag == 'struct' + self.fulltypename = '%s_t @' % (self.name,) + if self.db.with_stm(): + tlprefix = ' TLPREFIX' + else: + tlprefix = '' + self.forward_decl = 'typedef%s struct %s %s_t;' % ( + tlprefix, self.name, self.name) + else: + self.fulltypename = '%s %s @' % (self.typetag, self.name) + + def getfieldtype(self, T, is_array=False): + if self.db.with_stm(): + if isinstance(T, GcStruct): + node = self.db.gettypedefnode(T) + self.dependencies.add(node) + return 'struct %s' % node.name + if isinstance(T, OpaqueType): + if T.hints.get("is_stm_header", False): + return 'struct object_s @' + if is_array: + varlength = self.varlength + else: + varlength = None + return self.db.gettype(T, varlength=self.varlength, who_asks=self) + + class StructDefNode(NodeWithDependencies): typetag = 'struct' extra_union_for_varlength = True @@ -83,7 +112,7 @@ assert self.fieldnames == ('typeptr',) self.fieldnames = () # - self.fulltypename = '%s %s @' % (self.typetag, self.name) + self.make_full_type_name() def setup(self): # this computes self.fields @@ -98,15 +127,11 @@ if needs_gcheader(self.STRUCT): HDR = db.gcpolicy.struct_gcheader_definition(self) if HDR is not None: - gc_field = ("_gcheader", db.gettype(HDR, who_asks=self)) + gc_field = ("_gcheader", self.getfieldtype(HDR)) self.fields.append(gc_field) for name in self.fieldnames: T = self.c_struct_field_type(name) - if name == STRUCT._arrayfld: - typename = db.gettype(T, varlength=self.varlength, - who_asks=self) - else: - typename = db.gettype(T, who_asks=self) + typename = self.getfieldtype(T, name==STRUCT._arrayfld) self.fields.append((self.c_struct_field_name(name), typename)) self.gcinfo # force it to be computed @@ -211,8 +236,8 @@ (self.barename, self.name) = db.namespace.uniquename(basename, with_number=with_number, bare=True) - self.fulltypename = '%s %s @' % (self.typetag, self.name) - self.fullptrtypename = '%s %s *@' % (self.typetag, self.name) + self.make_full_type_name() + self.fullptrtypename = self.fulltypename.replace('@', '*@') def setup(self): if hasattr(self, 'itemtypename'): @@ -225,7 +250,7 @@ if needs_gcheader(ARRAY): HDR = db.gcpolicy.array_gcheader_definition(self) if HDR is not None: - gc_field = ("_gcheader", db.gettype(HDR, who_asks=self)) + gc_field = ("_gcheader", self.getfieldtype(HDR)) self.gcfields.append(gc_field) self.itemtypename = db.gettype(ARRAY.OF, who_asks=self) @@ -494,8 +519,7 @@ def is_thread_local(self): T = self.getTYPE() return hasattr(T, "_hints") and (T._hints.get('thread_local') or ( - T._hints.get('stm_thread_local') and - self.db.translator.config.translation.stm)) + T._hints.get('stm_thread_local') and self.db.with_stm())) def compilation_info(self): return getattr(self.obj, self.eci_name, None) @@ -967,7 +991,7 @@ return db.gcpolicy.rtti_node_factory()(db, T, obj) if T.hints.get("render_structure", False): return ExtType_OpaqueNode(db, T, obj) - if T.__name__ == 'struct stm_object_s': + if T.hints.get("is_stm_header", False): from rpython.translator.stm.funcgen import StmHeader_OpaqueNode return StmHeader_OpaqueNode(db, T, obj) raise Exception("don't know about %r" % (T,)) diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -84,7 +84,7 @@ def gen_makefile(self, cfiles, eci, exe_name=None, path=None, shared=False, headers_to_precompile=[], - no_precompile_cfiles = []): + no_precompile_cfiles = [], cc=None): cfiles = self._all_cfiles(cfiles, eci) if path is None: @@ -154,7 +154,7 @@ ('LDFLAGS', linkflags), ('LDFLAGS_LINK', list(self.link_flags)), ('LDFLAGSEXTRA', list(eci.link_extra)), - ('CC', self.cc), + ('CC', cc or self.cc), ('CC_LINK', eci.use_cpp_linker and 'g++' or '$(CC)'), ('LINKFILES', eci.link_files), ] diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -5,13 +5,13 @@ class StmHeaderOpaqueDefNode(Node): - typetag = 'struct' + typetag = '' dependencies = () def __init__(self, db, T): Node.__init__(self, db) self.T = T - self.name = 'stm_object_s' + self.name = 'object_t' def setup(self): pass @@ -26,7 +26,7 @@ class StmHeader_OpaqueNode(ContainerNode): nodekind = 'stmhdr' globalcontainer = True - typename = 'struct stm_object_s @' + typename = 'object_t @' implementationtypename = typename _funccodegen_owner = None @@ -37,9 +37,9 @@ self.obj = obj def initializationexpr(self, decoration=''): - yield '{ %s | PREBUILT_FLAGS, PREBUILT_REVISION, %dL }' % ( - name_small_integer(self.obj.typeid16, self.db), - self.obj.prebuilt_hash) + yield '{ { }, %s }' % ( + name_small_integer(self.obj.typeid16, self.db)) + # self.obj.prebuilt_hash def stm_initialize(funcgen, op): diff --git a/rpython/translator/stm/stmgcintf.py b/rpython/translator/stm/stmgcintf.py --- a/rpython/translator/stm/stmgcintf.py +++ b/rpython/translator/stm/stmgcintf.py @@ -32,6 +32,6 @@ separate_module_sources = [separate_source], ) -GCPTR = lltype.Ptr(rffi.COpaque('struct stm_object_s')) +GCPTR = lltype.Ptr(rffi.COpaque('object_t', hints={"is_stm_header": True})) CALLBACK_TX = lltype.Ptr(lltype.FuncType([GCPTR, rffi.INT_real], rffi.INT_real)) From noreply at buildbot.pypy.org Thu Mar 6 09:10:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 09:10:07 +0100 (CET) Subject: [pypy-commit] pypy default: typo Message-ID: <20140306081007.F21261C1007@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69750:2186a2662299 Date: 2014-03-06 09:09 +0100 http://bitbucket.org/pypy/pypy/changeset/2186a2662299/ Log: typo diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -740,7 +740,7 @@ print >> f, "#endif" def gen_preimpl(f, database): - f.write('#ifndef _PY_PREIMPLE_H\n#define _PY_PREIMPL_H\n') + f.write('#ifndef _PY_PREIMPL_H\n#define _PY_PREIMPL_H\n') if database.translator is None or database.translator.rtyper is None: return preimplementationlines = pre_include_code_lines( From noreply at buildbot.pypy.org Thu Mar 6 10:02:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 10:02:51 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: More in-progress-ness Message-ID: <20140306090251.C04AD1C10A8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69751:1519a5a63ecf Date: 2014-03-06 10:02 +0100 http://bitbucket.org/pypy/pypy/changeset/1519a5a63ecf/ Log: More in-progress-ness diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -38,14 +38,6 @@ malloc_zero_filled = True #gcflag_extra = GCFLAG_EXTRA - GCHDR = lltype.Struct( - 'GCPTR', - ('h_tid', lltype.Unsigned), - ('h_revision', lltype.Signed), - ('h_original', lltype.Unsigned)) - GCHDRP = lltype.Ptr(GCHDR) - GCHDRSIZE = 3 * WORD - HDR = stmgcintf.GCPTR.TO H_TID = 0 H_REVISION = WORD diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -68,8 +68,6 @@ return self.translator.config.translation.stm def gettypedefnode(self, T, varlength=None): - if self.with_stm(): - varlength = None if varlength is None: key = T else: @@ -102,8 +100,6 @@ return node def gettype(self, T, varlength=None, who_asks=None, argnames=[]): - if self.with_stm(): - varlength = None if isinstance(T, Primitive) or T == GCREF: return PrimitiveType[T] elif isinstance(T, Typedef): diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -766,6 +766,13 @@ print >> f print >> f, "#ifndef _PYPY_STRUCTDEF_H" print >> f, "#define _PYPY_STRUCTDEF_H" + if database.with_stm(): + print >> f + print >> f, 'typedef TLPREFIX struct rpyobj_s {' + print >> f, '\tstruct object_s lib;' + print >> f, '\tuint32_t tid;' + print >> f, '} rpyobj_t;' + print >> f for node in structdeflist: if hasattr(node, 'forward_decl'): if node.forward_decl: @@ -790,7 +797,7 @@ print >> f, "#endif" def gen_preimpl(f, database): - f.write('#ifndef _PY_PREIMPLE_H\n#define _PY_PREIMPL_H\n') + f.write('#ifndef _PY_PREIMPL_H\n#define _PY_PREIMPL_H\n') if database.translator is None or database.translator.rtyper is None: return preimplementationlines = pre_include_code_lines( diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -43,33 +43,40 @@ Node.__init__(self, db) self.dependencies = set() + def make_normalizedtypename(self): + if self.varlength is not None: + assert self.typetag == 'struct' + self.normalizedtypename = self.db.gettype(self.LLTYPE, + who_asks=self) + if not self.normalizedtypename.startswith('struct '): + assert self.db.with_stm() + assert self.normalizedtypename.endswith('_t @') + self.normalizedtypename = 'struct %s @' % ( + self.normalizedtypename[:-4],) + def make_full_type_name(self): if self.db.with_stm() and self.LLTYPE._gckind == 'gc': assert self.typetag == 'struct' self.fulltypename = '%s_t @' % (self.name,) - if self.db.with_stm(): - tlprefix = ' TLPREFIX' - else: - tlprefix = '' - self.forward_decl = 'typedef%s struct %s %s_t;' % ( - tlprefix, self.name, self.name) + self.forward_decl = 'typedef TLPREFIX struct %s %s_t;' % ( + self.name, self.name) else: self.fulltypename = '%s %s @' % (self.typetag, self.name) def getfieldtype(self, T, is_array=False): + if is_array: + varlength = self.varlength + else: + varlength = None if self.db.with_stm(): if isinstance(T, GcStruct): - node = self.db.gettypedefnode(T) + node = self.db.gettypedefnode(T, varlength=varlength) self.dependencies.add(node) return 'struct %s' % node.name if isinstance(T, OpaqueType): if T.hints.get("is_stm_header", False): - return 'struct object_s @' - if is_array: - varlength = self.varlength - else: - varlength = None - return self.db.gettype(T, varlength=self.varlength, who_asks=self) + return 'struct rpyobj_s @' + return self.db.gettype(T, varlength=varlength, who_asks=self) class StructDefNode(NodeWithDependencies): @@ -122,8 +129,7 @@ self.fields = [] db = self.db STRUCT = self.STRUCT - if self.varlength is not None: - self.normalizedtypename = db.gettype(STRUCT, who_asks=self) + self.make_normalizedtypename() if needs_gcheader(self.STRUCT): HDR = db.gcpolicy.struct_gcheader_definition(self) if HDR is not None: @@ -245,8 +251,7 @@ db = self.db ARRAY = self.ARRAY self.gcinfo # force it to be computed - if self.varlength is not None: - self.normalizedtypename = db.gettype(ARRAY, who_asks=self) + self.make_normalizedtypename() if needs_gcheader(ARRAY): HDR = db.gcpolicy.array_gcheader_definition(self) if HDR is not None: @@ -527,9 +532,14 @@ def get_declaration(self): if self.name[-2:] == '.b': # xxx fish fish - assert self.implementationtypename.startswith('struct ') - assert self.implementationtypename.endswith(' @') - uniontypename = 'union %su @' % self.implementationtypename[7:-2] + if self.implementationtypename.startswith('struct '): + assert self.implementationtypename.endswith(' @') + uniontypename = 'union %su @'%self.implementationtypename[7:-2] + else: + assert self.implementationtypename.endswith('_t @') + uniontypename = 'union %su @'%self.implementationtypename[:-4] + if self.db.with_stm(): + uniontypename = 'TLPREFIX ' + uniontypename return uniontypename, self.name[:-2] else: return self.implementationtypename, self.name diff --git a/rpython/translator/stm/stmgcintf.py b/rpython/translator/stm/stmgcintf.py --- a/rpython/translator/stm/stmgcintf.py +++ b/rpython/translator/stm/stmgcintf.py @@ -14,11 +14,11 @@ extern Signed pypy_stmcb_size(void*); extern void pypy_stmcb_trace(void*, void(*)(void*)); -inline size_t stmcb_size(gcptr obj) { +inline size_t stmcb_size(struct object_s *obj) { return pypy_stmcb_size(obj); } -inline void stmcb_trace(gcptr obj, void visit(gcptr *)) { +inline void stmcb_trace(struct object_s *obj, void visit(object_t **)) { pypy_stmcb_trace(obj, (void(*)(void*))visit); } @@ -32,6 +32,7 @@ separate_module_sources = [separate_source], ) -GCPTR = lltype.Ptr(rffi.COpaque('object_t', hints={"is_stm_header": True})) +GCPTR = lltype.Ptr(rffi.COpaque('rpyobj_t', + hints={"is_stm_header": True})) CALLBACK_TX = lltype.Ptr(lltype.FuncType([GCPTR, rffi.INT_real], rffi.INT_real)) From noreply at buildbot.pypy.org Thu Mar 6 10:14:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 10:14:20 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix for a mixture of __thread prebuilt data and address_space stuff that may or may not be a bug in clang. Message-ID: <20140306091420.BBB1B1C315D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69752:9d10c1b98ba3 Date: 2014-03-06 10:13 +0100 http://bitbucket.org/pypy/pypy/changeset/9d10c1b98ba3/ Log: Fix for a mixture of __thread prebuilt data and address_space stuff that may or may not be a bug in clang. diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -234,7 +234,8 @@ node._funccodegen_owner = funcgen return node.getptrname() else: - return '((%s) NULL)' % (cdecl(self.gettype(T), ''), ) + return 'NULL' + #return '((%s) NULL)' % (cdecl(self.gettype(T), ''), ) else: raise Exception("don't know about %r" % (obj,)) From noreply at buildbot.pypy.org Thu Mar 6 10:24:25 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 6 Mar 2014 10:24:25 +0100 (CET) Subject: [pypy-commit] stmgc default: fix gil-c7 with prebuilt objs Message-ID: <20140306092425.1E6CA1C35CC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r965:70871e1c5615 Date: 2014-03-06 10:25 +0100 http://bitbucket.org/pypy/stmgc/changeset/70871e1c5615/ Log: fix gil-c7 with prebuilt objs diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -339,7 +339,11 @@ td.num_roots_at_transaction_start = td.num_roots; - STM_START_TRANSACTION(&stm_thread_local, here); + if (get_rand(100) < 98) { + STM_START_TRANSACTION(&stm_thread_local, here); + } else { + stm_start_inevitable_transaction(&stm_thread_local); + } td.num_roots = td.num_roots_at_transaction_start; p = NULL; pop_roots(); diff --git a/gil-c7/stmgc.c b/gil-c7/stmgc.c --- a/gil-c7/stmgc.c +++ b/gil-c7/stmgc.c @@ -138,7 +138,7 @@ char *p = malloc(size); assert(p); memset(p, 0, size); - ((object_t *)p)->gil_flags = STM_FLAGS_PREBUILT; + ((object_t *)p)->gil_flags = _STM_GCFLAG_WRITE_BARRIER; return (object_t *)p; } diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -102,3 +102,9 @@ extern ssize_t stmcb_size_rounded_up(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); + +inline static object_t *stm_setup_prebuilt(object_t *preb) +{ + preb->gil_flags |= _STM_GCFLAG_WRITE_BARRIER; + return preb; +} From noreply at buildbot.pypy.org Thu Mar 6 11:26:42 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 6 Mar 2014 11:26:42 +0100 (CET) Subject: [pypy-commit] pypy default: update project-ideas Message-ID: <20140306102642.DAD591C3373@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69753:270944ea9be9 Date: 2014-03-06 12:25 +0200 http://bitbucket.org/pypy/pypy/changeset/270944ea9be9/ Log: update project-ideas diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -145,18 +145,11 @@ Embedding PyPy and improving CFFI ---------------------------------------- -Note: there is a basic proof-of-concept for that as a `uwsgi pypy plugin`_ +PyPy has some basic `embedding infrastructure`_. The idea would be to improve +upon that with cffi hacks that can automatically generate embeddable .so/.dll +library -Being able to embed PyPy, say with its own limited C API, would be -useful. But there is a possibly better variant: use CFFI. With some -minimal tools atop CFFI, it would be possible to write a pure Python -library, and then compile automatically from it an .so/.dll file that is -a dynamic-link library with whatever C API we want. This gives us a -one-size-fits-all generic way to make .so/.dll files from Python. - -This would fit well in a "redesign CFFI" work. - -.. _`uwsgi pypy plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html +.. _`embedding infrastructure`: embedding.html Optimising cpyext (CPython C-API compatibility layer) ----------------------------------------------------- From noreply at buildbot.pypy.org Thu Mar 6 11:29:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 11:29:39 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Hack hack Message-ID: <20140306102939.ED6A61C3373@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69754:acd1f927bb36 Date: 2014-03-06 11:28 +0100 http://bitbucket.org/pypy/pypy/changeset/acd1f927bb36/ Log: Hack hack diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -191,19 +191,9 @@ # run-time teardown code for tests! gcdata.gc._teardown() - bk = self.translator.annotator.bookkeeper r_typeid16 = rffi.platform.numbertype_to_rclass[TYPE_ID] s_typeid16 = annmodel.SomeInteger(knowntype=r_typeid16) - # the point of this little dance is to not annotate - # self.gcdata.static_root_xyz as constants. XXX is it still needed?? - data_classdef = bk.getuniqueclassdef(gctypelayout.GCData) - data_classdef.generalize_attr('static_root_start', SomeAddress()) - data_classdef.generalize_attr('static_root_nongcend', SomeAddress()) - data_classdef.generalize_attr('static_root_end', SomeAddress()) - data_classdef.generalize_attr('max_type_id', annmodel.SomeInteger()) - data_classdef.generalize_attr('typeids_z', SomeAddress()) - annhelper = annlowlevel.MixLevelHelperAnnotator(self.translator.rtyper) def getfn(ll_function, args_s, s_result, inline=False, @@ -227,6 +217,7 @@ self.weakref_deref_ptr = self.inittime_helper( ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address) + bk = self.translator.annotator.bookkeeper classdef = bk.getuniqueclassdef(GCClass) s_gc = annmodel.SomeInstance(classdef) @@ -272,6 +263,16 @@ from rpython.memory.gc.base import ARRAY_TYPEID_MAP from rpython.memory.gc import inspector + # the point of this little dance is to not annotate + # self.gcdata.static_root_xyz as constants. XXX is it still needed?? + bk = self.translator.annotator.bookkeeper + data_classdef = bk.getuniqueclassdef(gctypelayout.GCData) + data_classdef.generalize_attr('static_root_start', SomeAddress()) + data_classdef.generalize_attr('static_root_nongcend', SomeAddress()) + data_classdef.generalize_attr('static_root_end', SomeAddress()) + data_classdef.generalize_attr('max_type_id', annmodel.SomeInteger()) + data_classdef.generalize_attr('typeids_z', SomeAddress()) + s_gcref = SomePtr(llmemory.GCREF) gcdata = self.gcdata translator = self.translator diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -38,6 +38,10 @@ llannotation.SomePtr(GCClass.VISIT_FPTR)], annmodel.s_None)) + def finish_tables(self): + self.layoutbuilder.close_table() + return [] + def build_root_walker(self): return StmRootWalker(self) diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -72,7 +72,7 @@ if isinstance(T, GcStruct): node = self.db.gettypedefnode(T, varlength=varlength) self.dependencies.add(node) - return 'struct %s' % node.name + return 'struct %s @' % node.name if isinstance(T, OpaqueType): if T.hints.get("is_stm_header", False): return 'struct rpyobj_s @' @@ -538,7 +538,7 @@ else: assert self.implementationtypename.endswith('_t @') uniontypename = 'union %su @'%self.implementationtypename[:-4] - if self.db.with_stm(): + if self.db.with_stm() and self.getTYPE()._gckind == 'gc': uniontypename = 'TLPREFIX ' + uniontypename return uniontypename, self.name[:-2] else: From noreply at buildbot.pypy.org Thu Mar 6 12:48:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 12:48:43 +0100 (CET) Subject: [pypy-commit] pypy default: Update to pycparser 2.10. Message-ID: <20140306114843.49C1C1D2761@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69755:89ca87cad356 Date: 2014-03-06 12:47 +0100 http://bitbucket.org/pypy/pypy/changeset/89ca87cad356/ Log: Update to pycparser 2.10. diff too long, truncating to 2000 out of 3044 lines diff --git a/lib_pypy/cffi/_pycparser/__init__.py b/lib_pypy/cffi/_pycparser/__init__.py --- a/lib_pypy/cffi/_pycparser/__init__.py +++ b/lib_pypy/cffi/_pycparser/__init__.py @@ -1,14 +1,14 @@ #----------------------------------------------------------------- # pycparser: __init__.py # -# This package file exports some convenience functions for +# This package file exports some convenience functions for # interacting with pycparser # # Copyright (C) 2008-2012, Eli Bendersky # License: BSD #----------------------------------------------------------------- __all__ = ['c_lexer', 'c_parser', 'c_ast'] -__version__ = '2.09.1' +__version__ = '2.10' from subprocess import Popen, PIPE from .c_parser import CParser @@ -26,12 +26,12 @@ arguments. When successful, returns the preprocessed file's contents. - Errors from cpp will be printed out. + Errors from cpp will be printed out. """ path_list = [cpp_path] if isinstance(cpp_args, list): path_list += cpp_args - elif cpp_args != '': + elif cpp_args != '': path_list += [cpp_args] path_list += [filename] @@ -39,8 +39,8 @@ # Note the use of universal_newlines to treat all newlines # as \n for Python's purpose # - pipe = Popen( path_list, - stdout=PIPE, + pipe = Popen( path_list, + stdout=PIPE, universal_newlines=True) text = pipe.communicate()[0] except OSError as e: @@ -77,10 +77,10 @@ parser: Optional parser object to be used instead of the default CParser - When successful, an AST is returned. ParseError can be + When successful, an AST is returned. ParseError can be thrown if the file doesn't parse successfully. - Errors from cpp will be printed out. + Errors from cpp will be printed out. """ if use_cpp: text = preprocess_file(filename, cpp_path, cpp_args) diff --git a/lib_pypy/cffi/_pycparser/_build_tables.py b/lib_pypy/cffi/_pycparser/_build_tables.py --- a/lib_pypy/cffi/_pycparser/_build_tables.py +++ b/lib_pypy/cffi/_pycparser/_build_tables.py @@ -1,7 +1,7 @@ #----------------------------------------------------------------- # pycparser: _build_tables.py # -# A dummy for generating the lexing/parsing tables and and +# A dummy for generating the lexing/parsing tables and and # compiling them into .pyc for faster execution in optimized mode. # Also generates AST code from the configuration file. # Should be called from the pycparser directory. @@ -17,14 +17,14 @@ ast_gen.generate(open('c_ast.py', 'w')) import sys -sys.path.extend(['.', '..']) +sys.path[0:0] = ['.', '..'] from pycparser import c_parser # Generates the tables # c_parser.CParser( - lex_optimize=True, - yacc_debug=False, + lex_optimize=True, + yacc_debug=False, yacc_optimize=True) # Load to compile into .pyc diff --git a/lib_pypy/cffi/_pycparser/_c_ast.cfg b/lib_pypy/cffi/_pycparser/_c_ast.cfg --- a/lib_pypy/cffi/_pycparser/_c_ast.cfg +++ b/lib_pypy/cffi/_pycparser/_c_ast.cfg @@ -29,7 +29,7 @@ Cast: [to_type*, expr*] -# Compound statement in C99 is a list of block items (declarations or +# Compound statement in C99 is a list of block items (declarations or # statements). # Compound: [block_items**] @@ -37,7 +37,7 @@ # Compound literal (anonymous aggregate) for C99. # (type-name) {initializer_list} # type: the typename -# init: InitExprList for the initializer list +# init: InitList for the initializer list # CompoundLiteral: [type*, init*] diff --git a/lib_pypy/cffi/_pycparser/c_generator.py b/lib_pypy/cffi/_pycparser/c_generator.py --- a/lib_pypy/cffi/_pycparser/c_generator.py +++ b/lib_pypy/cffi/_pycparser/c_generator.py @@ -11,34 +11,34 @@ class CGenerator(object): """ Uses the same visitor pattern as c_ast.NodeVisitor, but modified to - return a value from each visit method, using string accumulation in + return a value from each visit method, using string accumulation in generic_visit. """ def __init__(self): self.output = '' - + # Statements start with indentation of self.indent_level spaces, using # the _make_indent method # self.indent_level = 0 - + def _make_indent(self): return ' ' * self.indent_level - + def visit(self, node): method = 'visit_' + node.__class__.__name__ return getattr(self, method, self.generic_visit)(node) - + def generic_visit(self, node): #~ print('generic:', type(node)) if node is None: return '' else: return ''.join(self.visit(c) for c in node.children()) - + def visit_Constant(self, n): return n.value - + def visit_ID(self, n): return n.name @@ -61,22 +61,22 @@ elif n.op == 'p--': return '%s--' % operand elif n.op == 'sizeof': - # Always parenthesize the argument of sizeof since it can be + # Always parenthesize the argument of sizeof since it can be # a name. return 'sizeof(%s)' % self.visit(n.expr) else: return '%s%s' % (n.op, operand) def visit_BinaryOp(self, n): - lval_str = self._parenthesize_if(n.left, + lval_str = self._parenthesize_if(n.left, lambda d: not self._is_simple_node(d)) - rval_str = self._parenthesize_if(n.right, + rval_str = self._parenthesize_if(n.right, lambda d: not self._is_simple_node(d)) return '%s %s %s' % (lval_str, n.op, rval_str) def visit_Assignment(self, n): rval_str = self._parenthesize_if( - n.rvalue, + n.rvalue, lambda n: isinstance(n, c_ast.Assignment)) return '%s %s %s' % (self.visit(n.lvalue), n.op, rval_str) @@ -101,7 +101,7 @@ def visit_DeclList(self, n): s = self.visit(n.decls[0]) if len(n.decls) > 1: - s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True) + s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True) for decl in n.decls[1:]) return s @@ -112,7 +112,7 @@ return s def visit_Cast(self, n): - s = '(' + self._generate_type(n.to_type) + ')' + s = '(' + self._generate_type(n.to_type) + ')' return s + ' ' + self._parenthesize_unless_simple(n.expr) def visit_ExprList(self, n): @@ -127,8 +127,10 @@ def visit_InitList(self, n): visited_subexprs = [] for expr in n.exprs: - if isinstance(expr, c_ast.InitList): + if isinstance(expr, c_ast.ExprList): visited_subexprs.append('(' + self.visit(expr) + ')') + elif isinstance(expr, c_ast.InitList): + visited_subexprs.append('{' + self.visit(expr) + '}') else: visited_subexprs.append(self.visit(expr)) return ', '.join(visited_subexprs) @@ -140,9 +142,9 @@ s += ' {' for i, enumerator in enumerate(n.values.enumerators): s += enumerator.name - if enumerator.value: + if enumerator.value: s += ' = ' + self.visit(enumerator.value) - if i != len(n.values.enumerators) - 1: + if i != len(n.values.enumerators) - 1: s += ', ' s += '}' return s @@ -203,7 +205,7 @@ if n.cond: s += self.visit(n.cond) s += ')\n' s += self._generate_stmt(n.iftrue, add_indent=True) - if n.iffalse: + if n.iffalse: s += self._make_indent() + 'else\n' s += self._generate_stmt(n.iffalse, add_indent=True) return s @@ -265,7 +267,7 @@ def visit_Typename(self, n): return self._generate_type(n.type) - + def visit_Union(self, n): return self._generate_struct_union(n, 'union') @@ -280,13 +282,13 @@ return s def _generate_struct_union(self, n, name): - """ Generates code for structs and unions. name should be either + """ Generates code for structs and unions. name should be either 'struct' or union. """ s = name + ' ' + (n.name or '') if n.decls: s += '\n' - s += self._make_indent() + s += self._make_indent() self.indent_level += 2 s += '{\n' for decl in n.decls: @@ -297,25 +299,26 @@ def _generate_stmt(self, n, add_indent=False): """ Generation from a statement node. This method exists as a wrapper - for individual visit_* methods to handle different treatment of + for individual visit_* methods to handle different treatment of some statements in this context. """ typ = type(n) if add_indent: self.indent_level += 2 indent = self._make_indent() if add_indent: self.indent_level -= 2 - - if typ in ( + + if typ in ( c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp, c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef, - c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef): + c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef, + c_ast.ExprList): # These can also appear in an expression context so no semicolon # is added to them automatically # return indent + self.visit(n) + ';\n' elif typ in (c_ast.Compound,): - # No extra indentation required before the opening brace of a - # compound - because it consists of multiple lines it has to + # No extra indentation required before the opening brace of a + # compound - because it consists of multiple lines it has to # compute its own indentation. # return self.visit(n) @@ -330,21 +333,21 @@ if n.storage: s += ' '.join(n.storage) + ' ' s += self._generate_type(n.type) return s - + def _generate_type(self, n, modifiers=[]): - """ Recursive generation from a type node. n is the type node. - modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers + """ Recursive generation from a type node. n is the type node. + modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers encountered on the way down to a TypeDecl, to allow proper generation from it. """ typ = type(n) #~ print(n, modifiers) - + if typ == c_ast.TypeDecl: s = '' if n.quals: s += ' '.join(n.quals) + ' ' s += self.visit(n.type) - + nstr = n.declname if n.declname else '' # Resolve modifiers. # Wrap in parens to distinguish pointer to array and pointer to @@ -396,7 +399,7 @@ """ Returns True for nodes that are "simple" - i.e. nodes that always have higher precedence than operators. """ - return isinstance(n,( c_ast.Constant, c_ast.ID, c_ast.ArrayRef, + return isinstance(n,( c_ast.Constant, c_ast.ID, c_ast.ArrayRef, c_ast.StructRef, c_ast.FuncCall)) diff --git a/lib_pypy/cffi/_pycparser/c_lexer.py b/lib_pypy/cffi/_pycparser/c_lexer.py --- a/lib_pypy/cffi/_pycparser/c_lexer.py +++ b/lib_pypy/cffi/_pycparser/c_lexer.py @@ -1,11 +1,11 @@ +#------------------------------------------------------------------------------ # pycparser: c_lexer.py # # CLexer class: lexer for the C language # -# Copyright (C) 2008-2011, Eli Bendersky +# Copyright (C) 2008-2013, Eli Bendersky # License: BSD -#----------------------------------------------------------------- - +#------------------------------------------------------------------------------ import re import sys @@ -15,41 +15,50 @@ class CLexer(object): """ A lexer for the C language. After building it, set the - input text with input(), and call token() to get new + input text with input(), and call token() to get new tokens. - + The public attribute filename can be set to an initial - filaneme, but the lexer will update it upon #line + filaneme, but the lexer will update it upon #line directives. """ - def __init__(self, error_func, type_lookup_func): + def __init__(self, error_func, on_lbrace_func, on_rbrace_func, + type_lookup_func): """ Create a new Lexer. - + error_func: An error function. Will be called with an error - message, line and column as arguments, in case of + message, line and column as arguments, in case of an error during lexing. - + + on_lbrace_func, on_rbrace_func: + Called when an LBRACE or RBRACE is encountered + (likely to push/pop type_lookup_func's scope) + type_lookup_func: A type lookup function. Given a string, it must return True IFF this string is a name of a type that was defined with a typedef earlier. """ self.error_func = error_func + self.on_lbrace_func = on_lbrace_func + self.on_rbrace_func = on_rbrace_func self.type_lookup_func = type_lookup_func self.filename = '' - + + # Keeps track of the last token returned from self.token() + self.last_token = None + # Allow either "# line" or "# " to support GCC's # cpp output # self.line_pattern = re.compile('([ \t]*line\W)|([ \t]*\d+)') - self.pragma_pattern = re.compile('[ \t]*pragma\W') def build(self, **kwargs): """ Builds the lexer from the specification. Must be - called after the lexer object is created. - + called after the lexer object is created. + This method exists separately, because the PLY manual warns against calling lex.lex inside __init__ @@ -63,10 +72,10 @@ def input(self, text): self.lexer.input(text) - + def token(self): - g = self.lexer.token() - return g + self.last_token = self.lexer.token() + return self.last_token def find_tok_column(self, token): """ Find the column of the token in its line. @@ -75,7 +84,7 @@ return token.lexpos - last_cr ######################-- PRIVATE --###################### - + ## ## Internal auxiliary methods ## @@ -83,10 +92,10 @@ location = self._make_tok_location(token) self.error_func(msg, location[0], location[1]) self.lexer.skip(1) - + def _make_tok_location(self, token): return (token.lineno, self.find_tok_column(token)) - + ## ## Reserved keywords ## @@ -113,35 +122,35 @@ ## tokens = keywords + ( # Identifiers - 'ID', - - # Type identifiers (identifiers previously defined as + 'ID', + + # Type identifiers (identifiers previously defined as # types with typedef) 'TYPEID', - - # constants + + # constants 'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX', 'FLOAT_CONST', 'HEX_FLOAT_CONST', 'CHAR_CONST', 'WCHAR_CONST', - + # String literals 'STRING_LITERAL', 'WSTRING_LITERAL', - # Operators + # Operators 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD', 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', 'LOR', 'LAND', 'LNOT', 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', - + # Assignment - 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', + 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL', - 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', + 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL', - # Increment/decrement + # Increment/decrement 'PLUSPLUS', 'MINUSMINUS', # Structure dereference (->) @@ -149,18 +158,18 @@ # Conditional operator (?) 'CONDOP', - - # Delimeters + + # Delimeters 'LPAREN', 'RPAREN', # ( ) 'LBRACKET', 'RBRACKET', # [ ] - 'LBRACE', 'RBRACE', # { } + 'LBRACE', 'RBRACE', # { } 'COMMA', 'PERIOD', # . , 'SEMI', 'COLON', # ; : # Ellipsis (...) 'ELLIPSIS', - - # pre-processor + + # pre-processor 'PPHASH', # '#' ) @@ -169,18 +178,18 @@ ## ## - # valid C identifiers (K&R2: A.2.3) - identifier = r'[a-zA-Z_][0-9a-zA-Z_]*' + # valid C identifiers (K&R2: A.2.3), plus '$' (supported by some compilers) + identifier = r'[a-zA-Z_$][0-9a-zA-Z_$]*' hex_prefix = '0[xX]' hex_digits = '[0-9a-fA-F]+' # integer constants (K&R2: A.2.5.1) - integer_suffix_opt = r'(u?ll|U?LL|([uU][lL])|([lL][uU])|[uU]|[lL])?' + integer_suffix_opt = r'(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?' decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')' octal_constant = '0[0-7]*'+integer_suffix_opt hex_constant = hex_prefix+hex_digits+integer_suffix_opt - + bad_octal_constant = '0[0-7]*[89]' # character constants (K&R2: A.2.5.2) @@ -196,14 +205,14 @@ bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])""" escape_sequence = r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))' - cconst_char = r"""([^'\\\n]|"""+escape_sequence+')' + cconst_char = r"""([^'\\\n]|"""+escape_sequence+')' char_const = "'"+cconst_char+"'" wchar_const = 'L'+char_const unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)" bad_char_const = r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+bad_escape+r"""[^'\n]*')""" # string literals (K&R2: A.2.6) - string_char = r"""([^"\\\n]|"""+escape_sequence+')' + string_char = r"""([^"\\\n]|"""+escape_sequence+')' string_literal = '"'+string_char+'*"' wstring_literal = 'L'+string_literal bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"' @@ -221,14 +230,14 @@ ## states = ( # ppline: preprocessor line directives - # + # ('ppline', 'exclusive'), # pppragma: pragma # ('pppragma', 'exclusive'), ) - + def t_PPHASH(self, t): r'[ \t]*\#' if self.line_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos): @@ -239,7 +248,7 @@ else: t.type = 'PPHASH' return t - + ## ## Rules for the ppline state ## @@ -261,21 +270,21 @@ def t_ppline_NEWLINE(self, t): r'\n' - + if self.pp_line is None: self._error('line number missing in #line', t) else: self.lexer.lineno = int(self.pp_line) - + if self.pp_filename is not None: self.filename = self.pp_filename - + t.lexer.begin('INITIAL') def t_ppline_PPLINE(self, t): r'line' pass - + t_ppline_ignore = ' \t' def t_ppline_error(self, t): @@ -292,7 +301,7 @@ def t_pppragma_PPPRAGMA(self, t): r'pragma' pass - + t_pppragma_ignore = ' \t<>.-{}();+-*/$%@&^~!?:,0123456789' @TOKEN(string_literal) @@ -364,17 +373,36 @@ t_RPAREN = r'\)' t_LBRACKET = r'\[' t_RBRACKET = r'\]' - t_LBRACE = r'\{' - t_RBRACE = r'\}' t_COMMA = r',' t_PERIOD = r'\.' t_SEMI = r';' t_COLON = r':' t_ELLIPSIS = r'\.\.\.' - t_STRING_LITERAL = string_literal - - # The following floating and integer constants are defined as + # Scope delimiters + # To see why on_lbrace_func is needed, consider: + # typedef char TT; + # void foo(int TT) { TT = 10; } + # TT x = 5; + # Outside the function, TT is a typedef, but inside (starting and ending + # with the braces) it's a parameter. The trouble begins with yacc's + # lookahead token. If we open a new scope in brace_open, then TT has + # already been read and incorrectly interpreted as TYPEID. So, we need + # to open and close scopes from within the lexer. + # Similar for the TT immediately outside the end of the function. + # + @TOKEN(r'\{') + def t_LBRACE(self, t): + self.on_lbrace_func() + return t + @TOKEN(r'\}') + def t_RBRACE(self, t): + self.on_rbrace_func() + return t + + t_STRING_LITERAL = string_literal + + # The following floating and integer constants are defined as # functions to impose a strict order (otherwise, decimal # is placed before the others because its regex is longer, # and this is bad) @@ -404,17 +432,17 @@ def t_INT_CONST_DEC(self, t): return t - # Must come before bad_char_const, to prevent it from + # Must come before bad_char_const, to prevent it from # catching valid char constants as invalid - # + # @TOKEN(char_const) def t_CHAR_CONST(self, t): return t - + @TOKEN(wchar_const) def t_WCHAR_CONST(self, t): return t - + @TOKEN(unmatched_quote) def t_UNMATCHED_QUOTE(self, t): msg = "Unmatched '" @@ -428,12 +456,12 @@ @TOKEN(wstring_literal) def t_WSTRING_LITERAL(self, t): return t - + # unmatched string literals are caught by the preprocessor - + @TOKEN(bad_string_literal) def t_BAD_STRING_LITERAL(self, t): - msg = "String contains invalid escape code" + msg = "String contains invalid escape code" self._error(msg, t) @TOKEN(identifier) @@ -442,40 +470,8 @@ if t.type == 'ID' and self.type_lookup_func(t.value): t.type = "TYPEID" return t - + def t_error(self, t): msg = 'Illegal character %s' % repr(t.value[0]) self._error(msg, t) - -if __name__ == "__main__": - filename = '../zp.c' - text = open(filename).read() - - #~ text = '"'+r"""ka \p ka"""+'"' - text = r""" - 546 - #line 66 "kwas\df.h" - id 4 - # 5 - dsf - """ - - def errfoo(msg, a, b): - sys.write(msg + "\n") - sys.exit() - - def typelookup(namd): - return False - - clex = CLexer(errfoo, typelookup) - clex.build() - clex.input(text) - - while 1: - tok = clex.token() - if not tok: break - - printme([tok.value, tok.type, tok.lineno, clex.filename, tok.lexpos]) - - diff --git a/lib_pypy/cffi/_pycparser/c_parser.py b/lib_pypy/cffi/_pycparser/c_parser.py --- a/lib_pypy/cffi/_pycparser/c_parser.py +++ b/lib_pypy/cffi/_pycparser/c_parser.py @@ -3,7 +3,7 @@ # # CParser class: Parser and AST builder for the C language # -# Copyright (C) 2008-2012, Eli Bendersky +# Copyright (C) 2008-2013, Eli Bendersky # License: BSD #------------------------------------------------------------------------------ import re @@ -16,64 +16,66 @@ from .ast_transforms import fix_switch_cases -class CParser(PLYParser): +class CParser(PLYParser): def __init__( - self, + self, lex_optimize=True, - lextab='cffi._pycparser.lextab', + lextab='pycparser.lextab', yacc_optimize=True, - yacctab='cffi._pycparser.yacctab', + yacctab='pycparser.yacctab', yacc_debug=False): """ Create a new CParser. - + Some arguments for controlling the debug/optimization - level of the parser are provided. The defaults are - tuned for release/performance mode. + level of the parser are provided. The defaults are + tuned for release/performance mode. The simple rules for using them are: *) When tweaking CParser/CLexer, set these to False *) When releasing a stable parser, set to True - + lex_optimize: Set to False when you're modifying the lexer. Otherwise, changes in the lexer won't be used, if some lextab.py file exists. When releasing with a stable lexer, set to True - to save the re-generation of the lexer table on + to save the re-generation of the lexer table on each run. - + lextab: Points to the lex table that's used for optimized mode. Only if you're modifying the lexer and want - some tests to avoid re-generating the table, make + some tests to avoid re-generating the table, make this point to a local lex table file (that's been earlier generated with lex_optimize=True) - + yacc_optimize: Set to False when you're modifying the parser. Otherwise, changes in the parser won't be used, if some parsetab.py file exists. When releasing with a stable parser, set to True - to save the re-generation of the parser table on + to save the re-generation of the parser table on each run. - + yacctab: Points to the yacc table that's used for optimized - mode. Only if you're modifying the parser, make + mode. Only if you're modifying the parser, make this point to a local yacc table file - + yacc_debug: Generate a parser.out file that explains how yacc built the parsing table from the grammar. """ self.clex = CLexer( error_func=self._lex_error_func, + on_lbrace_func=self._lex_on_lbrace_func, + on_rbrace_func=self._lex_on_rbrace_func, type_lookup_func=self._lex_type_lookup_func) - + self.clex.build( optimize=lex_optimize, lextab=lextab) self.tokens = self.clex.tokens - + rules_with_opt = [ 'abstract_declarator', 'assignment_expression', @@ -89,74 +91,118 @@ 'type_qualifier_list', 'struct_declarator_list' ] - + for rule in rules_with_opt: self._create_opt_rule(rule) - + self.cparser = yacc.yacc( - module=self, + module=self, start='translation_unit_or_empty', debug=yacc_debug, optimize=yacc_optimize, tabmodule=yacctab) - - # Stack of scopes for keeping track of typedefs. _scope_stack[-1] is - # the current (topmost) scope. - # - self._scope_stack = [set()] - + + # Stack of scopes for keeping track of symbols. _scope_stack[-1] is + # the current (topmost) scope. Each scope is a dictionary that + # specifies whether a name is a type. If _scope_stack[n][name] is + # True, 'name' is currently a type in the scope. If it's False, + # 'name' is used in the scope but not as a type (for instance, if we + # saw: int name; + # If 'name' is not a key in _scope_stack[n] then 'name' was not defined + # in this scope at all. + self._scope_stack = [dict()] + + # Keeps track of the last token given to yacc (the lookahead token) + self._last_yielded_token = None + def parse(self, text, filename='', debuglevel=0): """ Parses C code and returns an AST. - + text: A string containing the C source code - + filename: Name of the file being parsed (for meaningful error messages) - + debuglevel: Debug level to yacc """ self.clex.filename = filename self.clex.reset_lineno() - self._scope_stack = [set()] - return self.cparser.parse(text, lexer=self.clex, debug=debuglevel) - + self._scope_stack = [dict()] + self._last_yielded_token = None + return self.cparser.parse( + input=text, + lexer=self.clex, + debug=debuglevel) + ######################-- PRIVATE --###################### - + def _push_scope(self): - self._scope_stack.append(set()) + self._scope_stack.append(dict()) def _pop_scope(self): assert len(self._scope_stack) > 1 self._scope_stack.pop() - def _add_typedef_type(self, name): - """ Add a new typedef-name to the current scope + def _add_typedef_name(self, name, coord): + """ Add a new typedef name (ie a TYPEID) to the current scope """ - self._scope_stack[-1].add(name) - #~ print(self._scope_stack) + if not self._scope_stack[-1].get(name, True): + self._parse_error( + "Typedef %r previously declared as non-typedef " + "in this scope" % name, coord) + self._scope_stack[-1][name] = True + + def _add_identifier(self, name, coord): + """ Add a new object, function, or enum member name (ie an ID) to the + current scope + """ + if self._scope_stack[-1].get(name, False): + self._parse_error( + "Non-typedef %r previously declared as typedef " + "in this scope" % name, coord) + self._scope_stack[-1][name] = False def _is_type_in_scope(self, name): """ Is *name* a typedef-name in the current scope? """ - return any(name in scope for scope in self._scope_stack) + for scope in reversed(self._scope_stack): + # If name is an identifier in this scope it shadows typedefs in + # higher scopes. + in_scope = scope.get(name) + if in_scope is not None: return in_scope + return False def _lex_error_func(self, msg, line, column): self._parse_error(msg, self._coord(line, column)) - + + def _lex_on_lbrace_func(self): + self._push_scope() + + def _lex_on_rbrace_func(self): + self._pop_scope() + def _lex_type_lookup_func(self, name): """ Looks up types that were previously defined with - typedef. + typedef. Passed to the lexer for recognizing identifiers that are types. """ - return self._is_type_in_scope(name) - - # To understand what's going on here, read sections A.8.5 and + is_type = self._is_type_in_scope(name) + return is_type + + def _get_yacc_lookahead_token(self): + """ We need access to yacc's lookahead token in certain cases. + This is the last token yacc requested from the lexer, so we + ask the lexer. + """ + return self.clex.last_token + + # To understand what's going on here, read sections A.8.5 and # A.8.6 of K&R2 very carefully. - # + # # A C type consists of a basic type declaration, with a list # of modifiers. For example: # @@ -166,7 +212,7 @@ # the array are the modifiers. # # Basic declarations are represented by TypeDecl (from module - # c_ast) and the modifiers are FuncDecl, PtrDecl and + # c_ast) and the modifiers are FuncDecl, PtrDecl and # ArrayDecl. # # The standard states that whenever a new modifier is parsed, @@ -175,41 +221,41 @@ # # K&R2 A.8.6.2: Array Declarators # - # In a declaration T D where D has the form - # D1 [constant-expression-opt] - # and the type of the identifier in the declaration T D1 is - # "type-modifier T", the type of the + # In a declaration T D where D has the form + # D1 [constant-expression-opt] + # and the type of the identifier in the declaration T D1 is + # "type-modifier T", the type of the # identifier of D is "type-modifier array of T" # # This is what this method does. The declarator it receives - # can be a list of declarators ending with TypeDecl. It - # tacks the modifier to the end of this list, just before + # can be a list of declarators ending with TypeDecl. It + # tacks the modifier to the end of this list, just before # the TypeDecl. # - # Additionally, the modifier may be a list itself. This is + # Additionally, the modifier may be a list itself. This is # useful for pointers, that can come as a chain from the rule - # p_pointer. In this case, the whole modifier list is spliced + # p_pointer. In this case, the whole modifier list is spliced # into the new location. # def _type_modify_decl(self, decl, modifier): """ Tacks a type modifier on a declarator, and returns the modified declarator. - + Note: the declarator and modifier may be modified """ #~ print '****' #~ decl.show(offset=3) #~ modifier.show(offset=3) #~ print '****' - + modifier_head = modifier modifier_tail = modifier - + # The modifier may be a nested list. Reach its tail. # - while modifier_tail.type: + while modifier_tail.type: modifier_tail = modifier_tail.type - + # If the decl is a basic type, just tack the modifier onto # it # @@ -222,29 +268,29 @@ # pointing to the underlying basic type. # decl_tail = decl - + while not isinstance(decl_tail.type, c_ast.TypeDecl): decl_tail = decl_tail.type - + modifier_tail.type = decl_tail.type decl_tail.type = modifier_head return decl # Due to the order in which declarators are constructed, # they have to be fixed in order to look like a normal AST. - # + # # When a declaration arrives from syntax construction, it has # these problems: # * The innermost TypeDecl has no type (because the basic # type is only known at the uppermost declaration level) # * The declaration has no variable name, since that is saved # in the innermost TypeDecl - # * The typename of the declaration is a list of type + # * The typename of the declaration is a list of type # specifiers, and not a node. Here, basic identifier types # should be separated from more complex types like enums # and structs. # - # This method fixes these problem. + # This method fixes these problems. # def _fix_decl_name_type(self, decl, typename): """ Fixes a declaration. Modifies decl. @@ -254,13 +300,13 @@ type = decl while not isinstance(type, c_ast.TypeDecl): type = type.type - + decl.name = type.declname type.quals = decl.quals - - # The typename is a list of types. If any type in this + + # The typename is a list of types. If any type in this # list isn't an IdentifierType, it must be the only - # type in the list (it's illegal to declare "int enum .." + # type in the list (it's illegal to declare "int enum ..") # If all the types are basic, they're collected in the # IdentifierType holder. # @@ -272,14 +318,25 @@ else: type.type = tn return decl - - # At this point, we know that typename is a list of IdentifierType - # nodes. Concatenate all the names into a single list. - type.type = c_ast.IdentifierType( - [name for id in typename for name in id.names], - coord=typename[0].coord) + + if not typename: + # Functions default to returning int + # + if not isinstance(decl.type, c_ast.FuncDecl): + self._parse_error( + "Missing type in declaration", decl.coord) + type.type = c_ast.IdentifierType( + ['int'], + coord=decl.coord) + else: + # At this point, we know that typename is a list of IdentifierType + # nodes. Concatenate all the names into a single list. + # + type.type = c_ast.IdentifierType( + [name for id in typename for name in id.names], + coord=typename[0].coord) return decl - + def _add_declaration_specifier(self, declspec, newspec, kind): """ Declaration specifiers are represented by a dictionary with the entries: @@ -287,31 +344,115 @@ * storage: a list of storage type qualifiers * type: a list of type specifiers * function: a list of function specifiers - - This method is given a declaration specifier, and a + + This method is given a declaration specifier, and a new specifier of a given kind. - Returns the declaration specifier, with the new + Returns the declaration specifier, with the new specifier incorporated. """ spec = declspec or dict(qual=[], storage=[], type=[], function=[]) spec[kind].insert(0, newspec) return spec - - def _build_function_definition(self, decl, spec, param_decls, body): + + def _build_declarations(self, spec, decls, typedef_namespace=False): + """ Builds a list of declarations all sharing the given specifiers. + If typedef_namespace is true, each declared name is added + to the "typedef namespace", which also includes objects, + functions, and enum constants. + """ + is_typedef = 'typedef' in spec['storage'] + declarations = [] + + # Bit-fields are allowed to be unnamed. + # + if decls[0].get('bitsize') is not None: + pass + + # When redeclaring typedef names as identifiers in inner scopes, a + # problem can occur where the identifier gets grouped into + # spec['type'], leaving decl as None. This can only occur for the + # first declarator. + # + elif decls[0]['decl'] is None: + if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \ + not self._is_type_in_scope(spec['type'][-1].names[0]): + coord = '?' + for t in spec['type']: + if hasattr(t, 'coord'): + coord = t.coord + break + self._parse_error('Invalid declaration', coord) + + # Make this look as if it came from "direct_declarator:ID" + decls[0]['decl'] = c_ast.TypeDecl( + declname=spec['type'][-1].names[0], + type=None, + quals=None, + coord=spec['type'][-1].coord) + # Remove the "new" type's name from the end of spec['type'] + del spec['type'][-1] + + # A similar problem can occur where the declaration ends up looking + # like an abstract declarator. Give it a name if this is the case. + # + elif not isinstance(decls[0]['decl'], + (c_ast.Struct, c_ast.Union, c_ast.IdentifierType)): + decls_0_tail = decls[0]['decl'] + while not isinstance(decls_0_tail, c_ast.TypeDecl): + decls_0_tail = decls_0_tail.type + if decls_0_tail.declname is None: + decls_0_tail.declname = spec['type'][-1].names[0] + del spec['type'][-1] + + for decl in decls: + assert decl['decl'] is not None + if is_typedef: + declaration = c_ast.Typedef( + name=None, + quals=spec['qual'], + storage=spec['storage'], + type=decl['decl'], + coord=decl['decl'].coord) + else: + declaration = c_ast.Decl( + name=None, + quals=spec['qual'], + storage=spec['storage'], + funcspec=spec['function'], + type=decl['decl'], + init=decl.get('init'), + bitsize=decl.get('bitsize'), + coord=decl['decl'].coord) + + if isinstance(declaration.type, + (c_ast.Struct, c_ast.Union, c_ast.IdentifierType)): + fixed_decl = declaration + else: + fixed_decl = self._fix_decl_name_type(declaration, spec['type']) + + # Add the type name defined by typedef to a + # symbol table (for usage in the lexer) + # + if typedef_namespace: + if is_typedef: + self._add_typedef_name(fixed_decl.name, fixed_decl.coord) + else: + self._add_identifier(fixed_decl.name, fixed_decl.coord) + + declarations.append(fixed_decl) + + return declarations + + def _build_function_definition(self, spec, decl, param_decls, body): """ Builds a function definition. """ - declaration = c_ast.Decl( - name=None, - quals=spec['qual'], - storage=spec['storage'], - funcspec=spec['function'], - type=decl, - init=None, - bitsize=None, - coord=decl.coord) - - typename = spec['type'] - declaration = self._fix_decl_name_type(declaration, typename) + assert 'typedef' not in spec['storage'] + + declaration = self._build_declarations( + spec=spec, + decls=[dict(decl=decl, init=None)], + typedef_namespace=True)[0] + return c_ast.FuncDef( decl=declaration, param_decls=param_decls, @@ -361,29 +502,29 @@ p[0] = c_ast.FileAST(p[1]) def p_translation_unit_1(self, p): - """ translation_unit : external_declaration + """ translation_unit : external_declaration """ # Note: external_declaration is already a list # p[0] = p[1] - + def p_translation_unit_2(self, p): """ translation_unit : translation_unit external_declaration """ if p[2] is not None: p[1].extend(p[2]) p[0] = p[1] - + # Declarations always come as lists (because they can be - # several in one line), so we wrap the function definition - # into a list as well, to make the return value of + # several in one line), so we wrap the function definition + # into a list as well, to make the return value of # external_declaration homogenous. # def p_external_declaration_1(self, p): """ external_declaration : function_definition """ p[0] = [p[1]] - + def p_external_declaration_2(self, p): """ external_declaration : declaration """ @@ -393,16 +534,16 @@ """ external_declaration : pp_directive """ p[0] = p[1] - + def p_external_declaration_4(self, p): """ external_declaration : SEMI """ p[0] = None def p_pp_directive(self, p): - """ pp_directive : PPHASH + """ pp_directive : PPHASH """ - self._parse_error('Directives not supported yet', + self._parse_error('Directives not supported yet', self._coord(p.lineno(1))) # In function definitions, the declarator can be followed by @@ -411,32 +552,37 @@ def p_function_definition_1(self, p): """ function_definition : declarator declaration_list_opt compound_statement """ - # no declaration specifiers - spec = dict(qual=[], storage=[], type=[]) + # no declaration specifiers - 'int' becomes the default type + spec = dict( + qual=[], + storage=[], + type=[c_ast.IdentifierType(['int'], + coord=self._coord(p.lineno(1)))], + function=[]) p[0] = self._build_function_definition( + spec=spec, decl=p[1], - spec=spec, param_decls=p[2], body=p[3]) - + def p_function_definition_2(self, p): """ function_definition : declaration_specifiers declarator declaration_list_opt compound_statement """ spec = p[1] p[0] = self._build_function_definition( + spec=spec, decl=p[2], - spec=spec, param_decls=p[3], body=p[4]) - + def p_statement(self, p): """ statement : labeled_statement | expression_statement | compound_statement | selection_statement - | iteration_statement + | iteration_statement | jump_statement """ p[0] = p[1] @@ -454,66 +600,43 @@ """ decl_body : declaration_specifiers init_declarator_list_opt """ spec = p[1] - is_typedef = 'typedef' in spec['storage'] - decls = [] - + # p[2] (init_declarator_list_opt) is either a list or None # if p[2] is None: - # Then it's a declaration of a struct / enum tag, - # without an actual declarator. + # By the standard, you must have at least one declarator unless + # declaring a structure tag, a union tag, or the members of an + # enumeration. # ty = spec['type'] - if len(ty) > 1: - coord = '?' - for t in ty: - if hasattr(t, 'coord'): - coord = t.coord - break - - self._parse_error('Multiple type specifiers with a type tag', - coord) - - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - storage=spec['storage'], - funcspec=spec['function'], - type=ty[0], - init=None, - bitsize=None, - coord=ty[0].coord) - decls = [decl] + s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum) + if len(ty) == 1 and isinstance(ty[0], s_u_or_e): + decls = [c_ast.Decl( + name=None, + quals=spec['qual'], + storage=spec['storage'], + funcspec=spec['function'], + type=ty[0], + init=None, + bitsize=None, + coord=ty[0].coord)] + + # However, this case can also occur on redeclared identifiers in + # an inner scope. The trouble is that the redeclared type's name + # gets grouped into declaration_specifiers; _build_declarations + # compensates for this. + # + else: + decls = self._build_declarations( + spec=spec, + decls=[dict(decl=None, init=None)], + typedef_namespace=True) + else: - for decl, init in p[2] or []: - if is_typedef: - decl = c_ast.Typedef( - name=None, - quals=spec['qual'], - storage=spec['storage'], - type=decl, - coord=decl.coord) - else: - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - storage=spec['storage'], - funcspec=spec['function'], - type=decl, - init=init, - bitsize=None, - coord=decl.coord) - - typename = spec['type'] - fixed_decl = self._fix_decl_name_type(decl, typename) - - # Add the type name defined by typedef to a - # symbol table (for usage in the lexer) - # - if is_typedef: - self._add_typedef_type(fixed_decl.name) - - decls.append(fixed_decl) + decls = self._build_declarations( + spec=spec, + decls=p[2], + typedef_namespace=True) p[0] = decls @@ -522,7 +645,7 @@ # for defining typedefs. # # If a typedef line was directly followed by a line using the - # type defined with the typedef, the type would not be + # type defined with the typedef, the type would not be # recognized. This is because to reduce the declaration rule, # the parser's lookahead asked for the token after SEMI, which # was the type from the next line, and the lexer had no chance @@ -532,42 +655,41 @@ # the parser reduces decl_body, which actually adds the new # type into the table to be seen by the lexer before the next # line is reached. - # def p_declaration(self, p): - """ declaration : decl_body SEMI + """ declaration : decl_body SEMI """ p[0] = p[1] # Since each declaration is a list of declarations, this # rule will combine all the declarations and return a single # list - # + # def p_declaration_list(self, p): """ declaration_list : declaration | declaration_list declaration """ p[0] = p[1] if len(p) == 2 else p[1] + p[2] - + def p_declaration_specifiers_1(self, p): - """ declaration_specifiers : type_qualifier declaration_specifiers_opt + """ declaration_specifiers : type_qualifier declaration_specifiers_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'qual') - + def p_declaration_specifiers_2(self, p): """ declaration_specifiers : type_specifier declaration_specifiers_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'type') - + def p_declaration_specifiers_3(self, p): """ declaration_specifiers : storage_class_specifier declaration_specifiers_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'storage') - + def p_declaration_specifiers_4(self, p): """ declaration_specifiers : function_specifier declaration_specifiers_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'function') - + def p_storage_class_specifier(self, p): """ storage_class_specifier : AUTO | REGISTER @@ -576,12 +698,12 @@ | TYPEDEF """ p[0] = p[1] - + def p_function_specifier(self, p): """ function_specifier : INLINE """ p[0] = p[1] - + def p_type_specifier_1(self, p): """ type_specifier : VOID | _BOOL @@ -603,34 +725,52 @@ | struct_or_union_specifier """ p[0] = p[1] - + def p_type_qualifier(self, p): """ type_qualifier : CONST | RESTRICT | VOLATILE """ p[0] = p[1] - - def p_init_declarator_list(self, p): + + def p_init_declarator_list_1(self, p): """ init_declarator_list : init_declarator | init_declarator_list COMMA init_declarator """ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] - # Returns a (declarator, initializer) pair - # If there's no initializer, returns (declarator, None) + # If the code is declaring a variable that was declared a typedef in an + # outer scope, yacc will think the name is part of declaration_specifiers, + # not init_declarator, and will then get confused by EQUALS. Pass None + # up in place of declarator, and handle this at a higher level. + # + def p_init_declarator_list_2(self, p): + """ init_declarator_list : EQUALS initializer + """ + p[0] = [dict(decl=None, init=p[2])] + + # Similarly, if the code contains duplicate typedefs of, for example, + # array types, the array portion will appear as an abstract declarator. + # + def p_init_declarator_list_3(self, p): + """ init_declarator_list : abstract_declarator + """ + p[0] = [dict(decl=p[1], init=None)] + + # Returns a {decl= : init=} dictionary + # If there's no initializer, uses None # def p_init_declarator(self, p): """ init_declarator : declarator | declarator EQUALS initializer """ - p[0] = (p[1], p[3] if len(p) > 2 else None) - + p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None)) + def p_specifier_qualifier_list_1(self, p): """ specifier_qualifier_list : type_qualifier specifier_qualifier_list_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'qual') - + def p_specifier_qualifier_list_2(self, p): """ specifier_qualifier_list : type_specifier specifier_qualifier_list_opt """ @@ -645,8 +785,8 @@ """ klass = self._select_struct_union_class(p[1]) p[0] = klass( - name=p[2], - decls=None, + name=p[2], + decls=None, coord=self._coord(p.lineno(2))) def p_struct_or_union_specifier_2(self, p): @@ -669,7 +809,7 @@ coord=self._coord(p.lineno(2))) def p_struct_or_union(self, p): - """ struct_or_union : STRUCT + """ struct_or_union : STRUCT | UNION """ p[0] = p[1] @@ -686,59 +826,60 @@ """ struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI """ spec = p[1] - decls = [] - + assert 'typedef' not in spec['storage'] + if p[2] is not None: - for struct_decl in p[2]: - if struct_decl['decl'] is not None: - decl_coord = struct_decl['decl'].coord - else: - decl_coord = struct_decl['bitsize'].coord - - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - funcspec=spec['function'], - storage=spec['storage'], - type=struct_decl['decl'], - init=None, - bitsize=struct_decl['bitsize'], - coord=decl_coord) - - typename = spec['type'] - decls.append(self._fix_decl_name_type(decl, typename)) - else: + decls = self._build_declarations( + spec=spec, + decls=p[2]) + + elif len(spec['type']) == 1: # Anonymous struct/union, gcc extension, C1x feature. - # Although the standard only allows structs/unions here, I see no + # Although the standard only allows structs/unions here, I see no # reason to disallow other types since some compilers have typedefs # here, and pycparser isn't about rejecting all invalid code. - # + # node = spec['type'][0] - if isinstance(node, c_ast.Node): decl_type = node else: decl_type = c_ast.IdentifierType(node) - - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - funcspec=spec['function'], - storage=spec['storage'], - type=decl_type, - init=None, - bitsize=None, - coord=self._coord(p.lineno(3))) - decls.append(decl) - + + decls = self._build_declarations( + spec=spec, + decls=[dict(decl=decl_type)]) + + else: + # Structure/union members can have the same names as typedefs. + # The trouble is that the member's name gets grouped into + # specifier_qualifier_list; _build_declarations compensates. + # + decls = self._build_declarations( + spec=spec, + decls=[dict(decl=None, init=None)]) + p[0] = decls - + + def p_struct_declaration_2(self, p): + """ struct_declaration : specifier_qualifier_list abstract_declarator SEMI + """ + # "Abstract declarator?!", you ask? Structure members can have the + # same names as typedefs. The trouble is that the member's name gets + # grouped into specifier_qualifier_list, leaving any remainder to + # appear as an abstract declarator, as in: + # typedef int Foo; + # struct { Foo Foo[3]; }; + # + p[0] = self._build_declarations( + spec=p[1], + decls=[dict(decl=p[2], init=None)]) + def p_struct_declarator_list(self, p): """ struct_declarator_list : struct_declarator | struct_declarator_list COMMA struct_declarator """ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] - + # struct_declarator passes up a dict with the keys: decl (for # the underlying declarator) and bitsize (for the bitsize) # @@ -746,7 +887,7 @@ """ struct_declarator : declarator """ p[0] = {'decl': p[1], 'bitsize': None} - + def p_struct_declarator_2(self, p): """ struct_declarator : declarator COLON constant_expression | COLON constant_expression @@ -755,24 +896,24 @@ p[0] = {'decl': p[1], 'bitsize': p[3]} else: p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]} - + def p_enum_specifier_1(self, p): """ enum_specifier : ENUM ID | ENUM TYPEID """ p[0] = c_ast.Enum(p[2], None, self._coord(p.lineno(1))) - + def p_enum_specifier_2(self, p): """ enum_specifier : ENUM brace_open enumerator_list brace_close """ p[0] = c_ast.Enum(None, p[3], self._coord(p.lineno(1))) - + def p_enum_specifier_3(self, p): """ enum_specifier : ENUM ID brace_open enumerator_list brace_close | ENUM TYPEID brace_open enumerator_list brace_close """ p[0] = c_ast.Enum(p[2], p[4], self._coord(p.lineno(1))) - + def p_enumerator_list(self, p): """ enumerator_list : enumerator | enumerator_list COMMA @@ -791,95 +932,130 @@ | ID EQUALS constant_expression """ if len(p) == 2: - p[0] = c_ast.Enumerator( - p[1], None, + enumerator = c_ast.Enumerator( + p[1], None, self._coord(p.lineno(1))) else: - p[0] = c_ast.Enumerator( - p[1], p[3], + enumerator = c_ast.Enumerator( + p[1], p[3], self._coord(p.lineno(1))) - + self._add_identifier(enumerator.name, enumerator.coord) + + p[0] = enumerator + def p_declarator_1(self, p): - """ declarator : direct_declarator + """ declarator : direct_declarator """ p[0] = p[1] - + def p_declarator_2(self, p): - """ declarator : pointer direct_declarator + """ declarator : pointer direct_declarator """ p[0] = self._type_modify_decl(p[2], p[1]) - + + # Since it's impossible for a type to be specified after a pointer, assume + # it's intended to be the name for this declaration. _add_identifier will + # raise an error if this TYPEID can't be redeclared. + # + def p_declarator_3(self, p): + """ declarator : pointer TYPEID + """ + decl = c_ast.TypeDecl( + declname=p[2], + type=None, + quals=None, + coord=self._coord(p.lineno(2))) + + p[0] = self._type_modify_decl(decl, p[1]) + def p_direct_declarator_1(self, p): - """ direct_declarator : ID + """ direct_declarator : ID """ p[0] = c_ast.TypeDecl( - declname=p[1], - type=None, + declname=p[1], + type=None, quals=None, coord=self._coord(p.lineno(1))) - + def p_direct_declarator_2(self, p): - """ direct_declarator : LPAREN declarator RPAREN + """ direct_declarator : LPAREN declarator RPAREN """ p[0] = p[2] - + def p_direct_declarator_3(self, p): - """ direct_declarator : direct_declarator LBRACKET assignment_expression_opt RBRACKET + """ direct_declarator : direct_declarator LBRACKET assignment_expression_opt RBRACKET """ arr = c_ast.ArrayDecl( type=None, dim=p[3], coord=p[1].coord) - + p[0] = self._type_modify_decl(decl=p[1], modifier=arr) # Special for VLAs # def p_direct_declarator_4(self, p): - """ direct_declarator : direct_declarator LBRACKET TIMES RBRACKET + """ direct_declarator : direct_declarator LBRACKET TIMES RBRACKET """ arr = c_ast.ArrayDecl( type=None, dim=c_ast.ID(p[3], self._coord(p.lineno(3))), coord=p[1].coord) - + p[0] = self._type_modify_decl(decl=p[1], modifier=arr) def p_direct_declarator_5(self, p): - """ direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN + """ direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN | direct_declarator LPAREN identifier_list_opt RPAREN """ func = c_ast.FuncDecl( args=p[3], type=None, coord=p[1].coord) - + + # To see why _get_yacc_lookahead_token is needed, consider: + # typedef char TT; + # void foo(int TT) { TT = 10; } + # Outside the function, TT is a typedef, but inside (starting and + # ending with the braces) it's a parameter. The trouble begins with + # yacc's lookahead token. We don't know if we're declaring or + # defining a function until we see LBRACE, but if we wait for yacc to + # trigger a rule on that token, then TT will have already been read + # and incorrectly interpreted as TYPEID. We need to add the + # parameters to the scope the moment the lexer sees LBRACE. + # + if self._get_yacc_lookahead_token().type == "LBRACE": + if func.args is not None: + for param in func.args.params: + if isinstance(param, c_ast.EllipsisParam): break + self._add_identifier(param.name, param.coord) + p[0] = self._type_modify_decl(decl=p[1], modifier=func) - + def p_pointer(self, p): """ pointer : TIMES type_qualifier_list_opt | TIMES type_qualifier_list_opt pointer """ coord = self._coord(p.lineno(1)) - + p[0] = c_ast.PtrDecl( quals=p[2] or [], type=p[3] if len(p) > 3 else None, coord=coord) - + def p_type_qualifier_list(self, p): """ type_qualifier_list : type_qualifier | type_qualifier_list type_qualifier """ p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]] - + def p_parameter_type_list(self, p): """ parameter_type_list : parameter_list | parameter_list COMMA ELLIPSIS """ - if len(p) > 2: + if len(p) > 2: p[1].params.append(c_ast.EllipsisParam(self._coord(p.lineno(3)))) - + p[0] = p[1] def p_parameter_list(self, p): @@ -896,33 +1072,43 @@ """ parameter_declaration : declaration_specifiers declarator """ spec = p[1] - decl = p[2] - - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - storage=spec['storage'], - funcspec=spec['function'], - type=decl, - init=None, - bitsize=None, - coord=decl.coord) - - typename = spec['type'] or ['int'] - p[0] = self._fix_decl_name_type(decl, typename) - + if not spec['type']: + spec['type'] = [c_ast.IdentifierType(['int'], + coord=self._coord(p.lineno(1)))] + p[0] = self._build_declarations( + spec=spec, + decls=[dict(decl=p[2])])[0] + def p_parameter_declaration_2(self, p): """ parameter_declaration : declaration_specifiers abstract_declarator_opt """ spec = p[1] - decl = c_ast.Typename( - quals=spec['qual'], - type=p[2] or c_ast.TypeDecl(None, None, None), - coord=self._coord(p.lineno(2))) - - typename = spec['type'] or ['int'] - p[0] = self._fix_decl_name_type(decl, typename) - + if not spec['type']: + spec['type'] = [c_ast.IdentifierType(['int'], + coord=self._coord(p.lineno(1)))] + + # Parameters can have the same names as typedefs. The trouble is that + # the parameter's name gets grouped into declaration_specifiers, making + # it look like an old-style declaration; compensate. + # + if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \ + self._is_type_in_scope(spec['type'][-1].names[0]): + decl = self._build_declarations( + spec=spec, + decls=[dict(decl=p[2], init=None)])[0] + + # This truly is an old-style parameter declaration + # + else: + decl = c_ast.Typename( + quals=spec['qual'], + type=p[2] or c_ast.TypeDecl(None, None, None), + coord=self._coord(p.lineno(2))) + typename = spec['type'] + decl = self._fix_decl_name_type(decl, typename) + + p[0] = decl + def p_identifier_list(self, p): """ identifier_list : identifier | identifier_list COMMA identifier @@ -937,7 +1123,7 @@ """ initializer : assignment_expression """ p[0] = p[1] - + def p_initializer_2(self, p): """ initializer : brace_open initializer_list brace_close | brace_open initializer_list COMMA brace_close @@ -955,12 +1141,12 @@ init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4]) p[1].exprs.append(init) p[0] = p[1] - + def p_designation(self, p): """ designation : designator_list EQUALS """ p[0] = p[1] From noreply at buildbot.pypy.org Thu Mar 6 15:59:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 15:59:28 +0100 (CET) Subject: [pypy-commit] pypy default: Oups. Thanks Alex Message-ID: <20140306145928.1A0821C315D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69756:0634a0579002 Date: 2014-03-06 15:58 +0100 http://bitbucket.org/pypy/pypy/changeset/0634a0579002/ Log: Oups. Thanks Alex diff --git a/lib_pypy/cffi/_pycparser/README b/lib_pypy/cffi/_pycparser/README new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/_pycparser/README @@ -0,0 +1,12 @@ +This is a copy of pycparser. See __init__.py for the version. + +Note that the following two lines have been modified in c_parser.py: + + +class CParser(PLYParser): + def __init__( + ... + lextab='cffi._pycparser.lextab', + ^^^^^^^^^^^^^^^ + yacctab='cffi._pycparser.yacctab', + ^^^^^^^^^^^^^^^ diff --git a/lib_pypy/cffi/_pycparser/c_parser.py b/lib_pypy/cffi/_pycparser/c_parser.py --- a/lib_pypy/cffi/_pycparser/c_parser.py +++ b/lib_pypy/cffi/_pycparser/c_parser.py @@ -20,9 +20,9 @@ def __init__( self, lex_optimize=True, - lextab='pycparser.lextab', + lextab='cffi._pycparser.lextab', yacc_optimize=True, - yacctab='pycparser.yacctab', + yacctab='cffi._pycparser.yacctab', yacc_debug=False): """ Create a new CParser. From noreply at buildbot.pypy.org Thu Mar 6 16:08:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 16:08:15 +0100 (CET) Subject: [pypy-commit] cffi default: Patch by Alex_Gaynor: remove usage of "Feature", which is deprecated. Message-ID: <20140306150815.202A61C1007@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1472:5298c79cdfb5 Date: 2014-03-06 16:07 +0100 http://bitbucket.org/cffi/cffi/changeset/5298c79cdfb5/ Log: Patch by Alex_Gaynor: remove usage of "Feature", which is deprecated. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -96,7 +96,19 @@ if __name__ == '__main__': - from setuptools import setup, Feature, Extension + from setuptools import setup, Extension + ext_modules = [] + if '__pypy__' not in sys.modules: + ext_modules.append(Extension( + name='_cffi_backend', + include_dirs=include_dirs, + sources=sources, + libraries=libraries, + define_macros=define_macros, + library_dirs=library_dirs, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, + )) setup( name='cffi', description='Foreign Function Interface for Python calling C code.', @@ -122,23 +134,7 @@ license='MIT', - features={ - 'cextension': Feature( - "fast c backend for cpython", - standard='__pypy__' not in sys.modules, - ext_modules=[ - Extension(name='_cffi_backend', - include_dirs=include_dirs, - sources=sources, - libraries=libraries, - define_macros=define_macros, - library_dirs=library_dirs, - extra_compile_args=extra_compile_args, - extra_link_args=extra_link_args, - ), - ], - ), - }, + ext_modules=ext_modules, install_requires=[ 'pycparser', From noreply at buildbot.pypy.org Thu Mar 6 16:13:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 16:13:00 +0100 (CET) Subject: [pypy-commit] cffi default: Reindent this code to the more standard 4 spaces. Message-ID: <20140306151300.654F31C35CC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1473:9c3051a0a194 Date: 2014-03-06 16:12 +0100 http://bitbucket.org/cffi/cffi/changeset/9c3051a0a194/ Log: Reindent this code to the more standard 4 spaces. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -96,23 +96,23 @@ if __name__ == '__main__': - from setuptools import setup, Extension - ext_modules = [] - if '__pypy__' not in sys.modules: - ext_modules.append(Extension( - name='_cffi_backend', - include_dirs=include_dirs, - sources=sources, - libraries=libraries, - define_macros=define_macros, - library_dirs=library_dirs, - extra_compile_args=extra_compile_args, - extra_link_args=extra_link_args, - )) - setup( - name='cffi', - description='Foreign Function Interface for Python calling C code.', - long_description=""" + from setuptools import setup, Extension + ext_modules = [] + if '__pypy__' not in sys.modules: + ext_modules.append(Extension( + name='_cffi_backend', + include_dirs=include_dirs, + sources=sources, + libraries=libraries, + define_macros=define_macros, + library_dirs=library_dirs, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, + )) + setup( + name='cffi', + description='Foreign Function Interface for Python calling C code.', + long_description=""" CFFI ==== @@ -123,29 +123,29 @@ ------- `Mailing list `_ - """, - version='0.8.2', - packages=['cffi'], - zip_safe=False, +""", + version='0.8.2', + packages=['cffi'], + zip_safe=False, - url='http://cffi.readthedocs.org', - author='Armin Rigo, Maciej Fijalkowski', - author_email='python-cffi at googlegroups.com', + url='http://cffi.readthedocs.org', + author='Armin Rigo, Maciej Fijalkowski', + author_email='python-cffi at googlegroups.com', - license='MIT', + license='MIT', - ext_modules=ext_modules, + ext_modules=ext_modules, - install_requires=[ - 'pycparser', - ], - classifiers=[ - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.6', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.2', - 'Programming Language :: Python :: 3.3', - ], - ) + install_requires=[ + 'pycparser', + ], + classifiers=[ + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.2', + 'Programming Language :: Python :: 3.3', + ], + ) From noreply at buildbot.pypy.org Thu Mar 6 17:37:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 17:37:15 +0100 (CET) Subject: [pypy-commit] stmgc default: Add stm_can_move(). Message-ID: <20140306163715.C35251C35DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r966:d0f79129cbb7 Date: 2014-03-06 17:37 +0100 http://bitbucket.org/pypy/stmgc/changeset/d0f79129cbb7/ Log: Add stm_can_move(). diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -1,4 +1,5 @@ #include +#include /************************************************************/ diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -48,8 +48,9 @@ tree_contains(STM_PSEGMENT->young_outside_nursery, (uintptr_t)obj)); } -bool _stm_in_nursery(object_t *obj) +long stm_can_move(object_t *obj) { + /* 'long' return value to avoid using 'bool' in the public interface */ return _is_in_nursery(obj); } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -9,7 +9,6 @@ #include #include -#include #include #include #include @@ -72,10 +71,10 @@ object_t *_stm_allocate_old(ssize_t size_rounded_up); char *_stm_real_address(object_t *o); #ifdef STM_TESTS +#include bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); uint8_t _stm_get_page_flag(uintptr_t index); -bool _stm_in_nursery(object_t *obj); bool _stm_in_transaction(stm_thread_local_t *tl); char *_stm_get_segment_base(long index); void _stm_test_switch(stm_thread_local_t *tl); @@ -108,8 +107,8 @@ #else #define OPT_ASSERT(cond) assert(cond) #endif -#define LIKELY(x) __builtin_expect(x, true) -#define UNLIKELY(x) __builtin_expect(x, false) +#define LIKELY(x) __builtin_expect(x, 1) +#define UNLIKELY(x) __builtin_expect(x, 0) #define IMPLY(a, b) (!(a) || (b)) @@ -269,6 +268,10 @@ long stm_id(object_t *obj); void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash); +/* Returns 1 if the object can still move (it's in the nursery), or 0 + otherwise. After a minor collection no object can move any more. */ +long stm_can_move(object_t *); + /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -33,7 +33,6 @@ bool _checked_stm_write(object_t *obj); bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); -bool _stm_in_nursery(object_t *obj); char *_stm_real_address(object_t *obj); char *_stm_get_segment_base(long index); bool _stm_in_transaction(stm_thread_local_t *tl); @@ -78,6 +77,8 @@ long stm_identityhash(object_t *obj); long stm_id(object_t *obj); void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash); + +int stm_can_move(object_t *); """) @@ -269,7 +270,7 @@ pass def is_in_nursery(o): - return lib._stm_in_nursery(o) + return lib.stm_can_move(o) def stm_allocate_old(size): o = lib._stm_allocate_old(size) diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -184,3 +184,16 @@ stm_write(old) # old objs to trace stm_set_char(old, 'y') self.commit_transaction() + + def test_can_move(self): + self.start_transaction() + new = stm_allocate(16) + assert lib.stm_can_move(new) == 1 + self.push_root(new) + stm_minor_collect() + old = self.pop_root() + assert lib.stm_can_move(old) == 0 + self.commit_transaction() + + self.start_transaction() + assert lib.stm_can_move(old) == 0 From noreply at buildbot.pypy.org Thu Mar 6 19:16:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 19:16:14 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: A branch in which to play with a "fast GIL" version to put around Message-ID: <20140306181614.2298D1C1007@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69757:ea951d73866a Date: 2014-03-06 19:14 +0100 http://bitbucket.org/pypy/pypy/changeset/ea951d73866a/ Log: A branch in which to play with a "fast GIL" version to put around the C calls done directly by jit-generated assembler. From noreply at buildbot.pypy.org Thu Mar 6 19:16:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 19:16:15 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: Untested, but the idea would be to have this kind of code. Message-ID: <20140306181615.55AEC1C1007@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69758:ff24b8da9ce6 Date: 2014-03-06 19:15 +0100 http://bitbucket.org/pypy/pypy/changeset/ff24b8da9ce6/ Log: Untested, but the idea would be to have this kind of code. diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -485,15 +485,20 @@ #ifdef HAS_ATOMIC_ADD # define atomic_add __sync_fetch_and_add #else +static inline long atomic_add(long *ptr, long value) +{ + long result; + asm volatile ( # if defined(__amd64__) -# define atomic_add(ptr, value) asm volatile ("lock addq %0, %1" \ - : : "ri"(value), "m"(*(ptr)) : "memory") + "lock xaddq %0, %1" # elif defined(__i386__) -# define atomic_add(ptr, value) asm volatile ("lock addl %0, %1" \ - : : "ri"(value), "m"(*(ptr)) : "memory") + "lock xaddl %0, %1" # else # error "Please use gcc >= 4.1 or write a custom 'asm' for your CPU." # endif + : "=r"(result) : "0"(value), "m"(*ptr) : "memory"); + return result; +} #endif #define ASSERT_STATUS(call) \ @@ -575,14 +580,100 @@ ASSERT_STATUS(pthread_cond_signal(&cond_gil)); } +#ifdef RPY_FASTGIL_VARNAME +#include + +static inline timespec_add(struct timespec *t, unsigned long long incr) +{ + unsigned long long nsec = t->tv_nsec + incr; + if (nsec >= 1000000000) { + t->tv_sec += (nsec / 1000000000); + nsec %= 1000000000; + } + t->tv_nsec = (long)nsec; +} + +static inline void _acquire_gil_or_wait_for_fastgil_to_be_one(void) +{ + /* Support for the JIT, which generates calls to external C + functions using the following very fast pattern: + + * the global variable 'RPY_FASTGIL_VARNAME' (a macro naming the + real variable) contains normally 0 + + * before doing an external C call, the generated assembler sets + it to 1 + + * afterwards, it uses an atomic instruction to decrement it, + and if it goes back to 0, everything is fine + + * otherwise, someone else (this function actually) stole the + GIL. The assembler needs to call RPyGilAcquire() again. + + This function is a balancing act inspired by CPython 2.7's + threading.py for _Condition.wait() (not the PyPy version, which + was modified). We need to wait for the real GIL to be released, + but also notice if the fast GIL contains 1. We can't afford a + pure busy loop, so we have to sleep; but if we just sleep until + the real GIL is released, we won't ever see the fast GIL being 1. + The scheme here sleeps very little at first, and longer as time + goes on. Eventually, the real GIL should be released, so there + is no point in trying to bound the maximal length of the wait. + */ + unsigned long long delay = 100000; /* in ns; initial delay is 0.1 ms */ + struct timespec t; + clock_gettime(CLOCK_REALTIME, &t); + + while (1) { + + /* try to see if we can steal the fast GIL */ + if (RPY_FASTGIL_VARNAME == 1) { + if (atomic_add(&RPY_FASTGIL_VARNAME, -1) == 1) { + /* yes, succeeded. We know that the other thread is + before the return to JITted assembler from the C + function call. The JITted assembler will definitely + call RPyGilAcquire() then. So we can just pretend + that the GIL --- which is still acquired --- is ours + now. + */ + return; + } + } + + /* sleep for a bit of time */ + timespec_add(&t, delay); + int error = pthread_mutex_timedlock(&mutex_gil, &t); + + if (error == ETIMEDOUT) { + delay = (delay * 3) / 2; + continue; + } + else { + ASSERT_STATUS(error); + /* succeeded in acquiring the real GIL */ + return; + } + } +} +#endif + void RPyGilAcquire(void) { _debug_print("about to RPyGilAcquire...\n"); #ifdef RPY_ASSERT assert(pending_acquires >= 0); #endif + if (pthread_mutex_trylock(&mutex_gil) == 0) { + assert_has_the_gil(); + _debug_print("got it without waiting\n"); + return; + } atomic_add(&pending_acquires, 1L); +#ifdef RPY_FASTGIL_VARNAME + _acquire_gil_or_wait_for_fastgil_to_be_zero(); +#else ASSERT_STATUS(pthread_mutex_lock(&mutex_gil)); +#endif atomic_add(&pending_acquires, -1L); assert_has_the_gil(); _debug_print("RPyGilAcquire\n"); From noreply at buildbot.pypy.org Thu Mar 6 19:58:45 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 6 Mar 2014 19:58:45 +0100 (CET) Subject: [pypy-commit] pypy py3k: cleanups for datetime, some from upstream Message-ID: <20140306185845.D5D0C1C3373@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r69759:f94d90d4bded Date: 2014-03-06 09:46 -0500 http://bitbucket.org/pypy/pypy/changeset/f94d90d4bded/ Log: cleanups for datetime, some from upstream diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -66,7 +66,7 @@ return _DAYS_IN_MONTH[month] def _days_before_month(year, month): - "year, month -> number of days in year preceeding first day of month." + "year, month -> number of days in year preceding first day of month." assert 1 <= month <= 12, 'month must be in 1..12' return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year)) @@ -316,7 +316,7 @@ raise TypeError("can't compare '%s' to '%s'" % ( type(x).__name__, type(y).__name__)) -class timedelta(object): +class timedelta: """Represent the difference between two datetime objects. Supported operators: @@ -324,7 +324,7 @@ - add, subtract timedelta - unary plus, minus, abs - compare to timedelta - - multiply, divide by int/long + - multiply, divide by int In addition, datetime supports subtraction of two datetime objects returning a timedelta, and addition or subtraction of a datetime @@ -399,21 +399,18 @@ # secondsfrac isn't referenced again if isinstance(microseconds, float): - microseconds += usdouble - microseconds = _round(microseconds) + microseconds = _round(microseconds + usdouble) seconds, microseconds = divmod(microseconds, 1000000) days, seconds = divmod(seconds, 24*3600) d += days - s += int(seconds) - microseconds = int(microseconds) + s += seconds else: microseconds = int(microseconds) seconds, microseconds = divmod(microseconds, 1000000) days, seconds = divmod(seconds, 24*3600) d += days - s += int(seconds) - microseconds += usdouble - microseconds = _round(microseconds) + s += seconds + microseconds = _round(microseconds + usdouble) assert isinstance(s, int) assert isinstance(microseconds, int) assert abs(s) <= 3 * 24 * 3600 @@ -640,7 +637,7 @@ microseconds=999999) timedelta.resolution = timedelta(microseconds=1) -class date(object): +class date: """Concrete date type. Constructors: @@ -930,7 +927,7 @@ date.max = date(9999, 12, 31) date.resolution = timedelta(days=1) -class tzinfo(object): +class tzinfo: """Abstract base class for time zone info classes. Subclasses must override the name(), utcoffset() and dst() methods. @@ -1000,7 +997,7 @@ _tzinfo_class = tzinfo -class time(object): +class time: """Time with time zone. Constructors: @@ -1039,7 +1036,8 @@ self = object.__new__(cls) self.__setstate(hour, minute or None) return self - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) + hour, minute, second, microsecond = _check_time_fields( + hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) self = object.__new__(cls) self._hour = hour @@ -1310,7 +1308,7 @@ """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) The year, month and day arguments are required. tzinfo may be None, or an - instance of a tzinfo subclass. The remaining arguments may be ints or longs. + instance of a tzinfo subclass. The remaining arguments may be ints. """ __slots__ = date.__slots__ + time.__slots__ @@ -1322,7 +1320,8 @@ self.__setstate(year, month) return self year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) + hour, minute, second, microsecond = _check_time_fields( + hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) self = object.__new__(cls) self._year = year @@ -1367,7 +1366,6 @@ A timezone info object may be passed in as well. """ - _check_tzinfo_arg(tz) converter = _time.localtime if tz is None else _time.gmtime @@ -1445,7 +1443,7 @@ def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." offset = self.utcoffset() - if offset: # neither None nor 0 + if offset: self -= offset y, m, d = self.year, self.month, self.day hh, mm, ss = self.hour, self.minute, self.second @@ -1757,7 +1755,7 @@ def __setstate(self, string, tzinfo): (yhi, ylo, self._month, self._day, self._hour, - self._minute, self._second, us1, us2, us3) = string + self._minute, self._second, us1, us2, us3) = string self._year = yhi * 256 + ylo self._microsecond = (((us1 << 8) | us2) << 8) | us3 if tzinfo is None or isinstance(tzinfo, _tzinfo_class): @@ -2092,6 +2090,7 @@ perverse time zone returns a negative dst()). So a breaking case must be pretty bizarre, and a tzinfo subclass can override fromutc() if it is. """ + try: from _datetime import * except ImportError: diff --git a/lib-python/3/test/datetimetester.py b/lib-python/3/test/datetimetester.py --- a/lib-python/3/test/datetimetester.py +++ b/lib-python/3/test/datetimetester.py @@ -50,7 +50,7 @@ self.assertEqual(datetime.MAXYEAR, 9999) def test_name_cleanup(self): - if not '_Fast' in str(type(self)): + if '_Fast' not in str(self): return datetime = datetime_module names = set(name for name in dir(datetime) @@ -126,11 +126,11 @@ # carry no data), but they need to be picklable anyway else # concrete subclasses can't be pickled. orig = tzinfo.__new__(tzinfo) - self.assertTrue(type(orig) is tzinfo) + self.assertIs(type(orig), tzinfo) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) - self.assertTrue(type(derived) is tzinfo) + self.assertIs(type(derived), tzinfo) def test_pickling_subclass(self): # Make sure we can pickle/unpickle an instance of a subclass. @@ -246,6 +246,8 @@ self.assertEqual(timezone(-5 * HOUR), timezone(-5 * HOUR, 'EST')) with self.assertRaises(TypeError): timezone(ZERO) < timezone(ZERO) self.assertIn(timezone(ZERO), {timezone(ZERO)}) + self.assertTrue(timezone(ZERO) != None) + self.assertFalse(timezone(ZERO) == None) def test_aware_datetime(self): # test that timezone instances can be used by datetime @@ -259,7 +261,7 @@ t.replace(tzinfo=tz).dst()) ############################################################################# -# Base clase for testing a particular aspect of timedelta, time, date and +# Base class for testing a particular aspect of timedelta, time, date and # datetime comparisons. class HarmlessMixedComparison: @@ -488,9 +490,9 @@ self.assertEqual(t1, t2) self.assertTrue(t1 <= t2) self.assertTrue(t1 >= t2) - self.assertTrue(not t1 != t2) - self.assertTrue(not t1 < t2) - self.assertTrue(not t1 > t2) + self.assertFalse(t1 != t2) + self.assertFalse(t1 < t2) + self.assertFalse(t1 > t2) for args in (3, 3, 3), (2, 4, 4), (2, 3, 5): t2 = timedelta(*args) # this is larger than t1 @@ -500,12 +502,12 @@ self.assertTrue(t2 >= t1) self.assertTrue(t1 != t2) self.assertTrue(t2 != t1) - self.assertTrue(not t1 == t2) - self.assertTrue(not t2 == t1) - self.assertTrue(not t1 > t2) - self.assertTrue(not t2 < t1) - self.assertTrue(not t1 >= t2) - self.assertTrue(not t2 <= t1) + self.assertFalse(t1 == t2) + self.assertFalse(t2 == t1) + self.assertFalse(t1 > t2) + self.assertFalse(t2 < t1) + self.assertFalse(t1 >= t2) + self.assertFalse(t2 <= t1) for badarg in OTHERSTUFF: self.assertEqual(t1 == badarg, False) @@ -618,6 +620,8 @@ eq(td(milliseconds=-0.5/1000), td(microseconds=-1)) eq(td(milliseconds=0.6/1000), td(microseconds=1)) eq(td(milliseconds=-0.6/1000), td(microseconds=-1)) + eq(td(seconds=0.5/10**6), td(microseconds=1)) + eq(td(seconds=-0.5/10**6), td(microseconds=-1)) # Rounding due to contributions from more than one field. us_per_hour = 3600e6 @@ -640,7 +644,7 @@ self.assertTrue(timedelta(0, 1)) self.assertTrue(timedelta(0, 0, 1)) self.assertTrue(timedelta(microseconds=1)) - self.assertTrue(not timedelta(0)) + self.assertFalse(timedelta(0)) def test_subclass_timedelta(self): @@ -656,17 +660,17 @@ return round(sum) t1 = T(days=1) - self.assertTrue(type(t1) is T) + self.assertIs(type(t1), T) self.assertEqual(t1.as_hours(), 24) t2 = T(days=-1, seconds=-3600) - self.assertTrue(type(t2) is T) + self.assertIs(type(t2), T) self.assertEqual(t2.as_hours(), -25) t3 = t1 + t2 - self.assertTrue(type(t3) is timedelta) + self.assertIs(type(t3), timedelta) t4 = T.from_td(t3) - self.assertTrue(type(t4) is T) + self.assertIs(type(t4), T) self.assertEqual(t3.days, t4.days) self.assertEqual(t3.seconds, t4.seconds) self.assertEqual(t3.microseconds, t4.microseconds) @@ -1018,8 +1022,9 @@ # It worked or it didn't. If it didn't, assume it's reason #2, and # let the test pass if they're within half a second of each other. - self.assertTrue(today == todayagain or - abs(todayagain - today) < timedelta(seconds=0.5)) + if today != todayagain: + self.assertAlmostEqual(todayagain, today, + delta=timedelta(seconds=0.5)) def test_weekday(self): for i in range(7): @@ -1213,9 +1218,9 @@ self.assertEqual(t1, t2) self.assertTrue(t1 <= t2) self.assertTrue(t1 >= t2) - self.assertTrue(not t1 != t2) - self.assertTrue(not t1 < t2) - self.assertTrue(not t1 > t2) + self.assertFalse(t1 != t2) + self.assertFalse(t1 < t2) + self.assertFalse(t1 > t2) for args in (3, 3, 3), (2, 4, 4), (2, 3, 5): t2 = self.theclass(*args) # this is larger than t1 @@ -1225,12 +1230,12 @@ self.assertTrue(t2 >= t1) self.assertTrue(t1 != t2) self.assertTrue(t2 != t1) - self.assertTrue(not t1 == t2) - self.assertTrue(not t2 == t1) - self.assertTrue(not t1 > t2) - self.assertTrue(not t2 < t1) - self.assertTrue(not t1 >= t2) - self.assertTrue(not t2 <= t1) + self.assertFalse(t1 == t2) + self.assertFalse(t2 == t1) + self.assertFalse(t1 > t2) + self.assertFalse(t2 < t1) + self.assertFalse(t1 >= t2) + self.assertFalse(t2 <= t1) for badarg in OTHERSTUFF: self.assertEqual(t1 == badarg, False) @@ -1696,9 +1701,9 @@ self.assertEqual(t1, t2) self.assertTrue(t1 <= t2) self.assertTrue(t1 >= t2) - self.assertTrue(not t1 != t2) - self.assertTrue(not t1 < t2) - self.assertTrue(not t1 > t2) + self.assertFalse(t1 != t2) + self.assertFalse(t1 < t2) + self.assertFalse(t1 > t2) for i in range(len(args)): newargs = args[:] @@ -1710,12 +1715,12 @@ self.assertTrue(t2 >= t1) self.assertTrue(t1 != t2) self.assertTrue(t2 != t1) - self.assertTrue(not t1 == t2) - self.assertTrue(not t2 == t1) - self.assertTrue(not t1 > t2) - self.assertTrue(not t2 < t1) - self.assertTrue(not t1 >= t2) - self.assertTrue(not t2 <= t1) + self.assertFalse(t1 == t2) + self.assertFalse(t2 == t1) + self.assertFalse(t1 > t2) + self.assertFalse(t2 < t1) + self.assertFalse(t1 >= t2) + self.assertFalse(t2 <= t1) # A helper for timestamp constructor tests. @@ -1793,7 +1798,7 @@ if abs(from_timestamp - from_now) <= tolerance: break # Else try again a few times. - self.assertTrue(abs(from_timestamp - from_now) <= tolerance) + self.assertLessEqual(abs(from_timestamp - from_now), tolerance) def test_strptime(self): string = '2004-12-01 13:02:47.197' @@ -2019,9 +2024,9 @@ self.assertEqual(t1, t2) self.assertTrue(t1 <= t2) self.assertTrue(t1 >= t2) - self.assertTrue(not t1 != t2) - self.assertTrue(not t1 < t2) - self.assertTrue(not t1 > t2) + self.assertFalse(t1 != t2) + self.assertFalse(t1 < t2) + self.assertFalse(t1 > t2) for i in range(len(args)): newargs = args[:] @@ -2033,12 +2038,12 @@ self.assertTrue(t2 >= t1) self.assertTrue(t1 != t2) self.assertTrue(t2 != t1) - self.assertTrue(not t1 == t2) - self.assertTrue(not t2 == t1) - self.assertTrue(not t1 > t2) - self.assertTrue(not t2 < t1) - self.assertTrue(not t1 >= t2) - self.assertTrue(not t2 <= t1) + self.assertFalse(t1 == t2) + self.assertFalse(t2 == t1) + self.assertFalse(t1 > t2) + self.assertFalse(t2 < t1) + self.assertFalse(t1 >= t2) + self.assertFalse(t2 <= t1) for badarg in OTHERSTUFF: self.assertEqual(t1 == badarg, False) @@ -2216,8 +2221,8 @@ self.assertTrue(cls(0, 1)) self.assertTrue(cls(0, 0, 1)) self.assertTrue(cls(0, 0, 0, 1)) - self.assertTrue(not cls(0)) - self.assertTrue(not cls()) + self.assertFalse(cls(0)) + self.assertFalse(cls()) def test_replace(self): cls = self.theclass @@ -2317,7 +2322,7 @@ def utcoffset(self, dt): pass b = BetterTry() t = cls(1, 1, 1, tzinfo=b) - self.assertTrue(t.tzinfo is b) + self.assertIs(t.tzinfo, b) def test_utc_offset_out_of_bounds(self): class Edgy(tzinfo): @@ -2356,9 +2361,9 @@ for t in (cls(1, 1, 1), cls(1, 1, 1, tzinfo=None), cls(1, 1, 1, tzinfo=C1())): - self.assertTrue(t.utcoffset() is None) - self.assertTrue(t.dst() is None) - self.assertTrue(t.tzname() is None) + self.assertIsNone(t.utcoffset()) + self.assertIsNone(t.dst()) + self.assertIsNone(t.tzname()) class C3(tzinfo): def utcoffset(self, dt): return timedelta(minutes=-1439) @@ -2453,7 +2458,7 @@ self.assertEqual(t.minute, 0) self.assertEqual(t.second, 0) self.assertEqual(t.microsecond, 0) - self.assertTrue(t.tzinfo is None) + self.assertIsNone(t.tzinfo) def test_zones(self): est = FixedOffset(-300, "EST", 1) @@ -2468,25 +2473,25 @@ self.assertEqual(t1.tzinfo, est) self.assertEqual(t2.tzinfo, utc) self.assertEqual(t3.tzinfo, met) - self.assertTrue(t4.tzinfo is None) + self.assertIsNone(t4.tzinfo) self.assertEqual(t5.tzinfo, utc) self.assertEqual(t1.utcoffset(), timedelta(minutes=-300)) self.assertEqual(t2.utcoffset(), timedelta(minutes=0)) self.assertEqual(t3.utcoffset(), timedelta(minutes=60)) - self.assertTrue(t4.utcoffset() is None) + self.assertIsNone(t4.utcoffset()) self.assertRaises(TypeError, t1.utcoffset, "no args") self.assertEqual(t1.tzname(), "EST") self.assertEqual(t2.tzname(), "UTC") self.assertEqual(t3.tzname(), "MET") - self.assertTrue(t4.tzname() is None) + self.assertIsNone(t4.tzname()) self.assertRaises(TypeError, t1.tzname, "no args") self.assertEqual(t1.dst(), timedelta(minutes=1)) self.assertEqual(t2.dst(), timedelta(minutes=-2)) self.assertEqual(t3.dst(), timedelta(minutes=3)) - self.assertTrue(t4.dst() is None) + self.assertIsNone(t4.dst()) self.assertRaises(TypeError, t1.dst, "no args") self.assertEqual(hash(t1), hash(t2)) @@ -2538,7 +2543,7 @@ self.assertRaises(TypeError, t.strftime, "%Z") # Issue #6697: - if '_Fast' in str(type(self)): + if '_Fast' in str(self): Badtzname.tz = '\ud800' self.assertRaises(ValueError, t.strftime, "%Z") @@ -2583,10 +2588,10 @@ self.assertTrue(t) t = cls(5, tzinfo=FixedOffset(300, "")) - self.assertTrue(not t) + self.assertFalse(t) t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, "")) - self.assertTrue(not t) + self.assertFalse(t) # Mostly ensuring this doesn't overflow internally. t = cls(0, tzinfo=FixedOffset(23*60 + 59, "")) @@ -2624,13 +2629,13 @@ # Ensure we can get rid of a tzinfo. self.assertEqual(base.tzname(), "+100") base2 = base.replace(tzinfo=None) - self.assertTrue(base2.tzinfo is None) - self.assertTrue(base2.tzname() is None) + self.assertIsNone(base2.tzinfo) + self.assertIsNone(base2.tzname()) # Ensure we can add one. base3 = base2.replace(tzinfo=z100) self.assertEqual(base, base3) - self.assertTrue(base.tzinfo is base3.tzinfo) + self.assertIs(base.tzinfo, base3.tzinfo) # Out of bounds. base = cls(1) @@ -2865,7 +2870,7 @@ tz55 = FixedOffset(-330, "west 5:30") timeaware = now.time().replace(tzinfo=tz55) nowaware = self.theclass.combine(now.date(), timeaware) - self.assertTrue(nowaware.tzinfo is tz55) + self.assertIs(nowaware.tzinfo, tz55) self.assertEqual(nowaware.timetz(), timeaware) # Can't mix aware and non-aware. @@ -2884,15 +2889,15 @@ # Adding a delta should preserve tzinfo. delta = timedelta(weeks=1, minutes=12, microseconds=5678) nowawareplus = nowaware + delta - self.assertTrue(nowaware.tzinfo is tz55) + self.assertIs(nowaware.tzinfo, tz55) nowawareplus2 = delta + nowaware - self.assertTrue(nowawareplus2.tzinfo is tz55) + self.assertIs(nowawareplus2.tzinfo, tz55) self.assertEqual(nowawareplus, nowawareplus2) # that - delta should be what we started with, and that - what we # started with should be delta. diff = nowawareplus - delta - self.assertTrue(diff.tzinfo is tz55) + self.assertIs(diff.tzinfo, tz55) self.assertEqual(nowaware, diff) self.assertRaises(TypeError, lambda: delta - nowawareplus) self.assertEqual(nowawareplus - nowaware, delta) @@ -2901,7 +2906,7 @@ tzr = FixedOffset(random.randrange(-1439, 1440), "randomtimezone") # Attach it to nowawareplus. nowawareplus = nowawareplus.replace(tzinfo=tzr) - self.assertTrue(nowawareplus.tzinfo is tzr) + self.assertIs(nowawareplus.tzinfo, tzr) # Make sure the difference takes the timezone adjustments into account. got = nowaware - nowawareplus # Expected: (nowaware base - nowaware offset) - @@ -2933,7 +2938,7 @@ off42 = FixedOffset(42, "42") another = meth(off42) again = meth(tz=off42) - self.assertTrue(another.tzinfo is again.tzinfo) + self.assertIs(another.tzinfo, again.tzinfo) self.assertEqual(another.utcoffset(), timedelta(minutes=42)) # Bad argument with and w/o naming the keyword. self.assertRaises(TypeError, meth, 16) @@ -2951,7 +2956,7 @@ timezone(timedelta(hours=15, minutes=58), "weirdtz"),]: for dummy in range(3): now = datetime.now(weirdtz) - self.assertTrue(now.tzinfo is weirdtz) + self.assertIs(now.tzinfo, weirdtz) utcnow = datetime.utcnow().replace(tzinfo=utc) now2 = utcnow.astimezone(weirdtz) if abs(now - now2) < timedelta(seconds=30): @@ -2972,7 +2977,7 @@ off42 = FixedOffset(42, "42") another = meth(ts, off42) again = meth(ts, tz=off42) - self.assertTrue(another.tzinfo is again.tzinfo) + self.assertIs(another.tzinfo, again.tzinfo) self.assertEqual(another.utcoffset(), timedelta(minutes=42)) # Bad argument with and w/o naming the keyword. self.assertRaises(TypeError, meth, ts, 16) @@ -3183,13 +3188,13 @@ # Ensure we can get rid of a tzinfo. self.assertEqual(base.tzname(), "+100") base2 = base.replace(tzinfo=None) - self.assertTrue(base2.tzinfo is None) - self.assertTrue(base2.tzname() is None) + self.assertIsNone(base2.tzinfo) + self.assertIsNone(base2.tzname()) # Ensure we can add one. base3 = base2.replace(tzinfo=z100) self.assertEqual(base, base3) - self.assertTrue(base.tzinfo is base3.tzinfo) + self.assertIs(base.tzinfo, base3.tzinfo) # Out of bounds. base = cls(2000, 2, 29) @@ -3202,20 +3207,20 @@ fm5h = FixedOffset(-timedelta(hours=5), "m300") dt = self.theclass.now(tz=f44m) - self.assertTrue(dt.tzinfo is f44m) + self.assertIs(dt.tzinfo, f44m) # Replacing with degenerate tzinfo raises an exception. self.assertRaises(ValueError, dt.astimezone, fnone) # Ditto with None tz. self.assertRaises(TypeError, dt.astimezone, None) # Replacing with same tzinfo makes no change. x = dt.astimezone(dt.tzinfo) - self.assertTrue(x.tzinfo is f44m) + self.assertIs(x.tzinfo, f44m) self.assertEqual(x.date(), dt.date()) self.assertEqual(x.time(), dt.time()) # Replacing with different tzinfo does adjust. got = dt.astimezone(fm5h) - self.assertTrue(got.tzinfo is fm5h) + self.assertIs(got.tzinfo, fm5h) self.assertEqual(got.utcoffset(), timedelta(hours=-5)) expected = dt - dt.utcoffset() # in effect, convert to UTC expected += fm5h.utcoffset(dt) # and from there to local time @@ -3223,7 +3228,7 @@ self.assertEqual(got.date(), expected.date()) self.assertEqual(got.time(), expected.time()) self.assertEqual(got.timetz(), expected.timetz()) - self.assertTrue(got.tzinfo is expected.tzinfo) + self.assertIs(got.tzinfo, expected.tzinfo) self.assertEqual(got, expected) def test_aware_subtract(self): @@ -3665,8 +3670,8 @@ as_datetime = datetime.combine(as_date, time()) self.assertTrue(as_date != as_datetime) self.assertTrue(as_datetime != as_date) - self.assertTrue(not as_date == as_datetime) - self.assertTrue(not as_datetime == as_date) + self.assertFalse(as_date == as_datetime) + self.assertFalse(as_datetime == as_date) self.assertRaises(TypeError, lambda: as_date < as_datetime) self.assertRaises(TypeError, lambda: as_datetime < as_date) self.assertRaises(TypeError, lambda: as_date <= as_datetime) From noreply at buildbot.pypy.org Thu Mar 6 20:25:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 20:25:24 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: Theoretical improvements Message-ID: <20140306192524.132C61C1007@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69760:d548518bdb3f Date: 2014-03-06 20:24 +0100 http://bitbucket.org/pypy/pypy/changeset/d548518bdb3f/ Log: Theoretical improvements diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -485,20 +485,15 @@ #ifdef HAS_ATOMIC_ADD # define atomic_add __sync_fetch_and_add #else -static inline long atomic_add(long *ptr, long value) -{ - long result; - asm volatile ( # if defined(__amd64__) - "lock xaddq %0, %1" +# define atomic_add(ptr, value) asm volatile ("lock addq %0, %1" \ + : : "ri"(value), "m"(*(ptr)) : "memory") # elif defined(__i386__) - "lock xaddl %0, %1" +# define atomic_add(ptr, value) asm volatile ("lock addl %0, %1" \ + : : "ri"(value), "m"(*(ptr)) : "memory") # else # error "Please use gcc >= 4.1 or write a custom 'asm' for your CPU." # endif - : "=r"(result) : "0"(value), "m"(*ptr) : "memory"); - return result; -} #endif #define ASSERT_STATUS(call) \ @@ -583,6 +578,21 @@ #ifdef RPY_FASTGIL_VARNAME #include +static inline void *atomic_xchg(void **ptr, void *value) +{ + void *result; + asm volatile ( +#if defined(__amd64__) + "xchgq %0, %1 /* automatically locked */" +#elif defined(__i386__) + "xchgl %0, %1 /* automatically locked */" +#else +# error "RPY_FASTGIL_VARNAME: only for x86 right now" +#endif + : "r"(result) : "0"(value), "m"(*ptr) : "memory"); + return result; +} + static inline timespec_add(struct timespec *t, unsigned long long incr) { unsigned long long nsec = t->tv_nsec + incr; @@ -593,7 +603,7 @@ t->tv_nsec = (long)nsec; } -static inline void _acquire_gil_or_wait_for_fastgil_to_be_one(void) +static inline void _acquire_gil_or_wait_for_fastgil_to_be_nonzero(void) { /* Support for the JIT, which generates calls to external C functions using the following very fast pattern: @@ -602,14 +612,27 @@ real variable) contains normally 0 * before doing an external C call, the generated assembler sets - it to 1 + this global variable to an in-stack pointer to its + ASM_FRAMEDATA_HEAD structure (for asmgcc) or to 1 (for + shadowstack, when implemented) - * afterwards, it uses an atomic instruction to decrement it, - and if it goes back to 0, everything is fine + * afterwards, it uses an atomic instruction to get the current + value stored in the variable and to replace it with zero - * otherwise, someone else (this function actually) stole the - GIL. The assembler needs to call RPyGilAcquire() again. - + * if the old value was still the ASM_FRAMEDATA_HEAD pointer of + this thread, everything is fine + + * otherwise, someone else stole the GIL. The assembler calls a + helper. This helper first needs to unlink this thread's + ASM_FRAMEDATA_HEAD from the chained list where it was put by + the stealing code. If the old value was zero, it means that + the stealing code was this function here. In that case, the + helper needs to call RPyGilAcquire() again. If, on the other + hand, the old value is another ASM_FRAMEDATA_HEAD from a + different thread, it means we just stole the fast GIL from this + other thread. In that case we store that different + ASM_FRAMEDATA_HEAD into the chained list and return immediately. + This function is a balancing act inspired by CPython 2.7's threading.py for _Condition.wait() (not the PyPy version, which was modified). We need to wait for the real GIL to be released, @@ -627,17 +650,25 @@ while (1) { /* try to see if we can steal the fast GIL */ - if (RPY_FASTGIL_VARNAME == 1) { - if (atomic_add(&RPY_FASTGIL_VARNAME, -1) == 1) { - /* yes, succeeded. We know that the other thread is - before the return to JITted assembler from the C - function call. The JITted assembler will definitely - call RPyGilAcquire() then. So we can just pretend - that the GIL --- which is still acquired --- is ours - now. - */ - return; - } + void *fastgilvalue; + fastgilvalue = atomic_xchg(&RPY_FASTGIL_VARNAME, NULL); + if (fastgilvalue != NULL) { + /* yes, succeeded. We know that the other thread is before + the return to JITted assembler from the C function call. + The JITted assembler will definitely call RPyGilAcquire() + then. So we can just pretend that the GIL --- which is + still acquired --- is ours now. We only need to fix + the asmgcc linked list. + */ + struct pypy_ASM_FRAMEDATA_HEAD0 *new = + (struct pypy_ASM_FRAMEDATA_HEAD0 *)fastgilvalue; + struct pypy_ASM_FRAMEDATA_HEAD0 *root = &pypy_g_ASM_FRAMEDATA_HEAD; + struct pypy_ASM_FRAMEDATA_HEAD0 *next = root->as_next; + new->as_next = next; + new->as_prev = root; + root->as_next = new; + next->as_prev = new; + return; } /* sleep for a bit of time */ @@ -670,7 +701,7 @@ } atomic_add(&pending_acquires, 1L); #ifdef RPY_FASTGIL_VARNAME - _acquire_gil_or_wait_for_fastgil_to_be_zero(); + _acquire_gil_or_wait_for_fastgil_to_be_nonzero(); #else ASSERT_STATUS(pthread_mutex_lock(&mutex_gil)); #endif From noreply at buildbot.pypy.org Thu Mar 6 20:32:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Mar 2014 20:32:28 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: Move clock_gettime() below Message-ID: <20140306193228.3B5771C3373@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69761:47ef34529770 Date: 2014-03-06 20:31 +0100 http://bitbucket.org/pypy/pypy/changeset/47ef34529770/ Log: Move clock_gettime() below diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -643,9 +643,8 @@ goes on. Eventually, the real GIL should be released, so there is no point in trying to bound the maximal length of the wait. */ - unsigned long long delay = 100000; /* in ns; initial delay is 0.1 ms */ + unsigned long long delay = 0; struct timespec t; - clock_gettime(CLOCK_REALTIME, &t); while (1) { @@ -672,6 +671,10 @@ } /* sleep for a bit of time */ + if (delay == 0) { + clock_gettime(CLOCK_REALTIME, &t); + delay = 100000; /* in ns; initial delay is 0.1 ms */ + } timespec_add(&t, delay); int error = pthread_mutex_timedlock(&mutex_gil, &t); From noreply at buildbot.pypy.org Thu Mar 6 21:21:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 6 Mar 2014 21:21:32 +0100 (CET) Subject: [pypy-commit] pypy default: cleanups for datetime Message-ID: <20140306202132.4B9DE1C35CC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69762:3f9e48c7b04c Date: 2014-03-06 14:41 -0500 http://bitbucket.org/pypy/pypy/changeset/3f9e48c7b04c/ Log: cleanups for datetime diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -66,7 +66,7 @@ return _DAYS_IN_MONTH[month] def _days_before_month(year, month): - "year, month -> number of days in year preceeding first day of month." + "year, month -> number of days in year preceding first day of month." assert 1 <= month <= 12, 'month must be in 1..12' return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year)) @@ -251,7 +251,7 @@ def _check_utc_offset(name, offset): assert name in ("utcoffset", "dst") if offset is None: - return None + return if not isinstance(offset, timedelta): raise TypeError("tzinfo.%s() must return None " "or timedelta, not '%s'" % (name, type(offset))) @@ -497,8 +497,7 @@ # secondsfrac isn't referenced again if isinstance(microseconds, float): - microseconds += usdouble - microseconds = _round(microseconds) + microseconds = _round(microseconds + usdouble) seconds, microseconds = divmod(microseconds, 1000000) days, seconds = divmod(seconds, 24*3600) d += days @@ -510,8 +509,7 @@ days, seconds = divmod(seconds, 24*3600) d += days s += int(seconds) - microseconds += usdouble - microseconds = _round(microseconds) + microseconds = _round(microseconds + usdouble) assert isinstance(s, int) assert isinstance(microseconds, int) assert abs(s) <= 3 * 24 * 3600 @@ -1140,7 +1138,8 @@ self = object.__new__(cls) self.__setstate(hour, minute or None) return self - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) + hour, minute, second, microsecond = _check_time_fields( + hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) self = object.__new__(cls) self._hour = hour @@ -1444,7 +1443,8 @@ self.__setstate(year, month) return self year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) + hour, minute, second, microsecond = _check_time_fields( + hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) self = object.__new__(cls) self._year = year diff --git a/pypy/module/test_lib_pypy/test_datetime.py b/pypy/module/test_lib_pypy/test_datetime.py --- a/pypy/module/test_lib_pypy/test_datetime.py +++ b/pypy/module/test_lib_pypy/test_datetime.py @@ -17,6 +17,14 @@ datetime.tzinfo()]: raises(AttributeError, 'x.abc = 1') +def test_timedelta_init_long(): + td = datetime.timedelta(microseconds=20000000000000000000) + assert td.days == 231481481 + assert td.seconds == 41600 + td = datetime.timedelta(microseconds=20000000000000000000.) + assert td.days == 231481481 + assert td.seconds == 41600 + def test_unpickle(): e = raises(TypeError, datetime.date, '123') assert e.value.args[0] == 'an integer is required' From noreply at buildbot.pypy.org Thu Mar 6 23:44:35 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 6 Mar 2014 23:44:35 +0100 (CET) Subject: [pypy-commit] pypy py3k: oops, fix Message-ID: <20140306224435.287FD1C3373@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69763:f2bf50e43d48 Date: 2014-03-06 14:41 -0800 http://bitbucket.org/pypy/pypy/changeset/f2bf50e43d48/ Log: oops, fix diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -263,11 +263,7 @@ try: self.sock.bind(self.sockaddress) break - except OperationError, e: # should get a "Permission denied" - if not e.match(space, space.getattr(w_socketmod, space.wrap("error"))): - raise - print(e.errorstr(space)) - except cls.w_sock_err, e: # should get a "Permission denied" + except socket.error as e: # should get a "Permission denied" print(e) else: raise(e) From noreply at buildbot.pypy.org Thu Mar 6 23:44:36 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 6 Mar 2014 23:44:36 +0100 (CET) Subject: [pypy-commit] pypy py3k: restore these which we still rely on Message-ID: <20140306224436.929181C3373@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69764:83b9a24138f1 Date: 2014-03-06 14:42 -0800 http://bitbucket.org/pypy/pypy/changeset/83b9a24138f1/ Log: restore these which we still rely on diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1275,6 +1275,18 @@ else: return index + def getslice(space, w_obj, w_start, w_stop): + w_slice = space.newslice(w_start, w_stop, space.w_None) + return space.getitem(w_obj, w_slice) + + def setslice(space, w_obj, w_start, w_stop, w_sequence): + w_slice = space.newslice(w_start, w_stop, space.w_None) + return space.setitem(w_obj, w_slice, w_sequence) + + def delslice(space, w_obj, w_start, w_stop): + w_slice = space.newslice(w_start, w_stop, space.w_None) + return space.delitem(w_obj, w_slice) + def r_longlong_w(self, w_obj, allow_conversion=True): bigint = self.bigint_w(w_obj, allow_conversion) try: From noreply at buildbot.pypy.org Thu Mar 6 23:44:37 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 6 Mar 2014 23:44:37 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to py3 which has an extra encoding step Message-ID: <20140306224437.C50F11C3373@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69765:8010fe669223 Date: 2014-03-06 14:42 -0800 http://bitbucket.org/pypy/pypy/changeset/8010fe669223/ Log: adapt to py3 which has an extra encoding step diff --git a/pypy/interpreter/test/test_zpy.py b/pypy/interpreter/test/test_zpy.py --- a/pypy/interpreter/test/test_zpy.py +++ b/pypy/interpreter/test/test_zpy.py @@ -118,9 +118,9 @@ '\t: LOAD_CONST 1 (None)\n' '\t: RETURN_VALUE 0 \n' '>>>> ') in output + # '5\n' --- this line sent to stderr assert ('\t: LOAD_NAME 0 (x)\n' - '\t: PRINT_EXPR 0 \n' - # '5\n' --- this line sent to stderr - '\t: LOAD_CONST 0 (None)\n' + '\t: PRINT_EXPR 0 \n') in output + assert ('\t: LOAD_CONST 0 (None)\n' '\t: RETURN_VALUE 0 \n' '>>>> ') in output From noreply at buildbot.pypy.org Thu Mar 6 23:44:38 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 6 Mar 2014 23:44:38 +0100 (CET) Subject: [pypy-commit] pypy py3k: buffer -> memoryview Message-ID: <20140306224438.EDCA21C3373@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69766:df1433032bf6 Date: 2014-03-06 14:42 -0800 http://bitbucket.org/pypy/pypy/changeset/df1433032bf6/ Log: buffer -> memoryview diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/module/__builtin__/test/test_buffer.py --- a/pypy/module/__builtin__/test/test_buffer.py +++ b/pypy/module/__builtin__/test/test_buffer.py @@ -67,7 +67,7 @@ def __int__(self): return self.x - buf = buffer('hello world') + buf = memoryview(b'hello world') raises(TypeError, "buf[MyInt(0)]") raises(TypeError, "buf[MyInt(0):MyInt(5)]") From noreply at buildbot.pypy.org Thu Mar 6 23:44:40 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 6 Mar 2014 23:44:40 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140306224440.5FD621C3373@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69767:99281ed7ef8e Date: 2014-03-06 14:43 -0800 http://bitbucket.org/pypy/pypy/changeset/99281ed7ef8e/ Log: merge default diff too long, truncating to 2000 out of 3097 lines diff --git a/lib_pypy/cffi/_pycparser/README b/lib_pypy/cffi/_pycparser/README new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/_pycparser/README @@ -0,0 +1,12 @@ +This is a copy of pycparser. See __init__.py for the version. + +Note that the following two lines have been modified in c_parser.py: + + +class CParser(PLYParser): + def __init__( + ... + lextab='cffi._pycparser.lextab', + ^^^^^^^^^^^^^^^ + yacctab='cffi._pycparser.yacctab', + ^^^^^^^^^^^^^^^ diff --git a/lib_pypy/cffi/_pycparser/__init__.py b/lib_pypy/cffi/_pycparser/__init__.py --- a/lib_pypy/cffi/_pycparser/__init__.py +++ b/lib_pypy/cffi/_pycparser/__init__.py @@ -1,14 +1,14 @@ #----------------------------------------------------------------- # pycparser: __init__.py # -# This package file exports some convenience functions for +# This package file exports some convenience functions for # interacting with pycparser # # Copyright (C) 2008-2012, Eli Bendersky # License: BSD #----------------------------------------------------------------- __all__ = ['c_lexer', 'c_parser', 'c_ast'] -__version__ = '2.09.1' +__version__ = '2.10' from subprocess import Popen, PIPE from .c_parser import CParser @@ -26,12 +26,12 @@ arguments. When successful, returns the preprocessed file's contents. - Errors from cpp will be printed out. + Errors from cpp will be printed out. """ path_list = [cpp_path] if isinstance(cpp_args, list): path_list += cpp_args - elif cpp_args != '': + elif cpp_args != '': path_list += [cpp_args] path_list += [filename] @@ -39,8 +39,8 @@ # Note the use of universal_newlines to treat all newlines # as \n for Python's purpose # - pipe = Popen( path_list, - stdout=PIPE, + pipe = Popen( path_list, + stdout=PIPE, universal_newlines=True) text = pipe.communicate()[0] except OSError as e: @@ -77,10 +77,10 @@ parser: Optional parser object to be used instead of the default CParser - When successful, an AST is returned. ParseError can be + When successful, an AST is returned. ParseError can be thrown if the file doesn't parse successfully. - Errors from cpp will be printed out. + Errors from cpp will be printed out. """ if use_cpp: text = preprocess_file(filename, cpp_path, cpp_args) diff --git a/lib_pypy/cffi/_pycparser/_build_tables.py b/lib_pypy/cffi/_pycparser/_build_tables.py --- a/lib_pypy/cffi/_pycparser/_build_tables.py +++ b/lib_pypy/cffi/_pycparser/_build_tables.py @@ -1,7 +1,7 @@ #----------------------------------------------------------------- # pycparser: _build_tables.py # -# A dummy for generating the lexing/parsing tables and and +# A dummy for generating the lexing/parsing tables and and # compiling them into .pyc for faster execution in optimized mode. # Also generates AST code from the configuration file. # Should be called from the pycparser directory. @@ -17,14 +17,14 @@ ast_gen.generate(open('c_ast.py', 'w')) import sys -sys.path.extend(['.', '..']) +sys.path[0:0] = ['.', '..'] from pycparser import c_parser # Generates the tables # c_parser.CParser( - lex_optimize=True, - yacc_debug=False, + lex_optimize=True, + yacc_debug=False, yacc_optimize=True) # Load to compile into .pyc diff --git a/lib_pypy/cffi/_pycparser/_c_ast.cfg b/lib_pypy/cffi/_pycparser/_c_ast.cfg --- a/lib_pypy/cffi/_pycparser/_c_ast.cfg +++ b/lib_pypy/cffi/_pycparser/_c_ast.cfg @@ -29,7 +29,7 @@ Cast: [to_type*, expr*] -# Compound statement in C99 is a list of block items (declarations or +# Compound statement in C99 is a list of block items (declarations or # statements). # Compound: [block_items**] @@ -37,7 +37,7 @@ # Compound literal (anonymous aggregate) for C99. # (type-name) {initializer_list} # type: the typename -# init: InitExprList for the initializer list +# init: InitList for the initializer list # CompoundLiteral: [type*, init*] diff --git a/lib_pypy/cffi/_pycparser/c_generator.py b/lib_pypy/cffi/_pycparser/c_generator.py --- a/lib_pypy/cffi/_pycparser/c_generator.py +++ b/lib_pypy/cffi/_pycparser/c_generator.py @@ -11,34 +11,34 @@ class CGenerator(object): """ Uses the same visitor pattern as c_ast.NodeVisitor, but modified to - return a value from each visit method, using string accumulation in + return a value from each visit method, using string accumulation in generic_visit. """ def __init__(self): self.output = '' - + # Statements start with indentation of self.indent_level spaces, using # the _make_indent method # self.indent_level = 0 - + def _make_indent(self): return ' ' * self.indent_level - + def visit(self, node): method = 'visit_' + node.__class__.__name__ return getattr(self, method, self.generic_visit)(node) - + def generic_visit(self, node): #~ print('generic:', type(node)) if node is None: return '' else: return ''.join(self.visit(c) for c in node.children()) - + def visit_Constant(self, n): return n.value - + def visit_ID(self, n): return n.name @@ -61,22 +61,22 @@ elif n.op == 'p--': return '%s--' % operand elif n.op == 'sizeof': - # Always parenthesize the argument of sizeof since it can be + # Always parenthesize the argument of sizeof since it can be # a name. return 'sizeof(%s)' % self.visit(n.expr) else: return '%s%s' % (n.op, operand) def visit_BinaryOp(self, n): - lval_str = self._parenthesize_if(n.left, + lval_str = self._parenthesize_if(n.left, lambda d: not self._is_simple_node(d)) - rval_str = self._parenthesize_if(n.right, + rval_str = self._parenthesize_if(n.right, lambda d: not self._is_simple_node(d)) return '%s %s %s' % (lval_str, n.op, rval_str) def visit_Assignment(self, n): rval_str = self._parenthesize_if( - n.rvalue, + n.rvalue, lambda n: isinstance(n, c_ast.Assignment)) return '%s %s %s' % (self.visit(n.lvalue), n.op, rval_str) @@ -101,7 +101,7 @@ def visit_DeclList(self, n): s = self.visit(n.decls[0]) if len(n.decls) > 1: - s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True) + s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True) for decl in n.decls[1:]) return s @@ -112,7 +112,7 @@ return s def visit_Cast(self, n): - s = '(' + self._generate_type(n.to_type) + ')' + s = '(' + self._generate_type(n.to_type) + ')' return s + ' ' + self._parenthesize_unless_simple(n.expr) def visit_ExprList(self, n): @@ -127,8 +127,10 @@ def visit_InitList(self, n): visited_subexprs = [] for expr in n.exprs: - if isinstance(expr, c_ast.InitList): + if isinstance(expr, c_ast.ExprList): visited_subexprs.append('(' + self.visit(expr) + ')') + elif isinstance(expr, c_ast.InitList): + visited_subexprs.append('{' + self.visit(expr) + '}') else: visited_subexprs.append(self.visit(expr)) return ', '.join(visited_subexprs) @@ -140,9 +142,9 @@ s += ' {' for i, enumerator in enumerate(n.values.enumerators): s += enumerator.name - if enumerator.value: + if enumerator.value: s += ' = ' + self.visit(enumerator.value) - if i != len(n.values.enumerators) - 1: + if i != len(n.values.enumerators) - 1: s += ', ' s += '}' return s @@ -203,7 +205,7 @@ if n.cond: s += self.visit(n.cond) s += ')\n' s += self._generate_stmt(n.iftrue, add_indent=True) - if n.iffalse: + if n.iffalse: s += self._make_indent() + 'else\n' s += self._generate_stmt(n.iffalse, add_indent=True) return s @@ -265,7 +267,7 @@ def visit_Typename(self, n): return self._generate_type(n.type) - + def visit_Union(self, n): return self._generate_struct_union(n, 'union') @@ -280,13 +282,13 @@ return s def _generate_struct_union(self, n, name): - """ Generates code for structs and unions. name should be either + """ Generates code for structs and unions. name should be either 'struct' or union. """ s = name + ' ' + (n.name or '') if n.decls: s += '\n' - s += self._make_indent() + s += self._make_indent() self.indent_level += 2 s += '{\n' for decl in n.decls: @@ -297,25 +299,26 @@ def _generate_stmt(self, n, add_indent=False): """ Generation from a statement node. This method exists as a wrapper - for individual visit_* methods to handle different treatment of + for individual visit_* methods to handle different treatment of some statements in this context. """ typ = type(n) if add_indent: self.indent_level += 2 indent = self._make_indent() if add_indent: self.indent_level -= 2 - - if typ in ( + + if typ in ( c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp, c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef, - c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef): + c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef, + c_ast.ExprList): # These can also appear in an expression context so no semicolon # is added to them automatically # return indent + self.visit(n) + ';\n' elif typ in (c_ast.Compound,): - # No extra indentation required before the opening brace of a - # compound - because it consists of multiple lines it has to + # No extra indentation required before the opening brace of a + # compound - because it consists of multiple lines it has to # compute its own indentation. # return self.visit(n) @@ -330,21 +333,21 @@ if n.storage: s += ' '.join(n.storage) + ' ' s += self._generate_type(n.type) return s - + def _generate_type(self, n, modifiers=[]): - """ Recursive generation from a type node. n is the type node. - modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers + """ Recursive generation from a type node. n is the type node. + modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers encountered on the way down to a TypeDecl, to allow proper generation from it. """ typ = type(n) #~ print(n, modifiers) - + if typ == c_ast.TypeDecl: s = '' if n.quals: s += ' '.join(n.quals) + ' ' s += self.visit(n.type) - + nstr = n.declname if n.declname else '' # Resolve modifiers. # Wrap in parens to distinguish pointer to array and pointer to @@ -396,7 +399,7 @@ """ Returns True for nodes that are "simple" - i.e. nodes that always have higher precedence than operators. """ - return isinstance(n,( c_ast.Constant, c_ast.ID, c_ast.ArrayRef, + return isinstance(n,( c_ast.Constant, c_ast.ID, c_ast.ArrayRef, c_ast.StructRef, c_ast.FuncCall)) diff --git a/lib_pypy/cffi/_pycparser/c_lexer.py b/lib_pypy/cffi/_pycparser/c_lexer.py --- a/lib_pypy/cffi/_pycparser/c_lexer.py +++ b/lib_pypy/cffi/_pycparser/c_lexer.py @@ -1,11 +1,11 @@ +#------------------------------------------------------------------------------ # pycparser: c_lexer.py # # CLexer class: lexer for the C language # -# Copyright (C) 2008-2011, Eli Bendersky +# Copyright (C) 2008-2013, Eli Bendersky # License: BSD -#----------------------------------------------------------------- - +#------------------------------------------------------------------------------ import re import sys @@ -15,41 +15,50 @@ class CLexer(object): """ A lexer for the C language. After building it, set the - input text with input(), and call token() to get new + input text with input(), and call token() to get new tokens. - + The public attribute filename can be set to an initial - filaneme, but the lexer will update it upon #line + filaneme, but the lexer will update it upon #line directives. """ - def __init__(self, error_func, type_lookup_func): + def __init__(self, error_func, on_lbrace_func, on_rbrace_func, + type_lookup_func): """ Create a new Lexer. - + error_func: An error function. Will be called with an error - message, line and column as arguments, in case of + message, line and column as arguments, in case of an error during lexing. - + + on_lbrace_func, on_rbrace_func: + Called when an LBRACE or RBRACE is encountered + (likely to push/pop type_lookup_func's scope) + type_lookup_func: A type lookup function. Given a string, it must return True IFF this string is a name of a type that was defined with a typedef earlier. """ self.error_func = error_func + self.on_lbrace_func = on_lbrace_func + self.on_rbrace_func = on_rbrace_func self.type_lookup_func = type_lookup_func self.filename = '' - + + # Keeps track of the last token returned from self.token() + self.last_token = None + # Allow either "# line" or "# " to support GCC's # cpp output # self.line_pattern = re.compile('([ \t]*line\W)|([ \t]*\d+)') - self.pragma_pattern = re.compile('[ \t]*pragma\W') def build(self, **kwargs): """ Builds the lexer from the specification. Must be - called after the lexer object is created. - + called after the lexer object is created. + This method exists separately, because the PLY manual warns against calling lex.lex inside __init__ @@ -63,10 +72,10 @@ def input(self, text): self.lexer.input(text) - + def token(self): - g = self.lexer.token() - return g + self.last_token = self.lexer.token() + return self.last_token def find_tok_column(self, token): """ Find the column of the token in its line. @@ -75,7 +84,7 @@ return token.lexpos - last_cr ######################-- PRIVATE --###################### - + ## ## Internal auxiliary methods ## @@ -83,10 +92,10 @@ location = self._make_tok_location(token) self.error_func(msg, location[0], location[1]) self.lexer.skip(1) - + def _make_tok_location(self, token): return (token.lineno, self.find_tok_column(token)) - + ## ## Reserved keywords ## @@ -113,35 +122,35 @@ ## tokens = keywords + ( # Identifiers - 'ID', - - # Type identifiers (identifiers previously defined as + 'ID', + + # Type identifiers (identifiers previously defined as # types with typedef) 'TYPEID', - - # constants + + # constants 'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX', 'FLOAT_CONST', 'HEX_FLOAT_CONST', 'CHAR_CONST', 'WCHAR_CONST', - + # String literals 'STRING_LITERAL', 'WSTRING_LITERAL', - # Operators + # Operators 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD', 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', 'LOR', 'LAND', 'LNOT', 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', - + # Assignment - 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', + 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL', - 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', + 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL', - # Increment/decrement + # Increment/decrement 'PLUSPLUS', 'MINUSMINUS', # Structure dereference (->) @@ -149,18 +158,18 @@ # Conditional operator (?) 'CONDOP', - - # Delimeters + + # Delimeters 'LPAREN', 'RPAREN', # ( ) 'LBRACKET', 'RBRACKET', # [ ] - 'LBRACE', 'RBRACE', # { } + 'LBRACE', 'RBRACE', # { } 'COMMA', 'PERIOD', # . , 'SEMI', 'COLON', # ; : # Ellipsis (...) 'ELLIPSIS', - - # pre-processor + + # pre-processor 'PPHASH', # '#' ) @@ -169,18 +178,18 @@ ## ## - # valid C identifiers (K&R2: A.2.3) - identifier = r'[a-zA-Z_][0-9a-zA-Z_]*' + # valid C identifiers (K&R2: A.2.3), plus '$' (supported by some compilers) + identifier = r'[a-zA-Z_$][0-9a-zA-Z_$]*' hex_prefix = '0[xX]' hex_digits = '[0-9a-fA-F]+' # integer constants (K&R2: A.2.5.1) - integer_suffix_opt = r'(u?ll|U?LL|([uU][lL])|([lL][uU])|[uU]|[lL])?' + integer_suffix_opt = r'(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?' decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')' octal_constant = '0[0-7]*'+integer_suffix_opt hex_constant = hex_prefix+hex_digits+integer_suffix_opt - + bad_octal_constant = '0[0-7]*[89]' # character constants (K&R2: A.2.5.2) @@ -196,14 +205,14 @@ bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])""" escape_sequence = r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))' - cconst_char = r"""([^'\\\n]|"""+escape_sequence+')' + cconst_char = r"""([^'\\\n]|"""+escape_sequence+')' char_const = "'"+cconst_char+"'" wchar_const = 'L'+char_const unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)" bad_char_const = r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+bad_escape+r"""[^'\n]*')""" # string literals (K&R2: A.2.6) - string_char = r"""([^"\\\n]|"""+escape_sequence+')' + string_char = r"""([^"\\\n]|"""+escape_sequence+')' string_literal = '"'+string_char+'*"' wstring_literal = 'L'+string_literal bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"' @@ -221,14 +230,14 @@ ## states = ( # ppline: preprocessor line directives - # + # ('ppline', 'exclusive'), # pppragma: pragma # ('pppragma', 'exclusive'), ) - + def t_PPHASH(self, t): r'[ \t]*\#' if self.line_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos): @@ -239,7 +248,7 @@ else: t.type = 'PPHASH' return t - + ## ## Rules for the ppline state ## @@ -261,21 +270,21 @@ def t_ppline_NEWLINE(self, t): r'\n' - + if self.pp_line is None: self._error('line number missing in #line', t) else: self.lexer.lineno = int(self.pp_line) - + if self.pp_filename is not None: self.filename = self.pp_filename - + t.lexer.begin('INITIAL') def t_ppline_PPLINE(self, t): r'line' pass - + t_ppline_ignore = ' \t' def t_ppline_error(self, t): @@ -292,7 +301,7 @@ def t_pppragma_PPPRAGMA(self, t): r'pragma' pass - + t_pppragma_ignore = ' \t<>.-{}();+-*/$%@&^~!?:,0123456789' @TOKEN(string_literal) @@ -364,17 +373,36 @@ t_RPAREN = r'\)' t_LBRACKET = r'\[' t_RBRACKET = r'\]' - t_LBRACE = r'\{' - t_RBRACE = r'\}' t_COMMA = r',' t_PERIOD = r'\.' t_SEMI = r';' t_COLON = r':' t_ELLIPSIS = r'\.\.\.' - t_STRING_LITERAL = string_literal - - # The following floating and integer constants are defined as + # Scope delimiters + # To see why on_lbrace_func is needed, consider: + # typedef char TT; + # void foo(int TT) { TT = 10; } + # TT x = 5; + # Outside the function, TT is a typedef, but inside (starting and ending + # with the braces) it's a parameter. The trouble begins with yacc's + # lookahead token. If we open a new scope in brace_open, then TT has + # already been read and incorrectly interpreted as TYPEID. So, we need + # to open and close scopes from within the lexer. + # Similar for the TT immediately outside the end of the function. + # + @TOKEN(r'\{') + def t_LBRACE(self, t): + self.on_lbrace_func() + return t + @TOKEN(r'\}') + def t_RBRACE(self, t): + self.on_rbrace_func() + return t + + t_STRING_LITERAL = string_literal + + # The following floating and integer constants are defined as # functions to impose a strict order (otherwise, decimal # is placed before the others because its regex is longer, # and this is bad) @@ -404,17 +432,17 @@ def t_INT_CONST_DEC(self, t): return t - # Must come before bad_char_const, to prevent it from + # Must come before bad_char_const, to prevent it from # catching valid char constants as invalid - # + # @TOKEN(char_const) def t_CHAR_CONST(self, t): return t - + @TOKEN(wchar_const) def t_WCHAR_CONST(self, t): return t - + @TOKEN(unmatched_quote) def t_UNMATCHED_QUOTE(self, t): msg = "Unmatched '" @@ -428,12 +456,12 @@ @TOKEN(wstring_literal) def t_WSTRING_LITERAL(self, t): return t - + # unmatched string literals are caught by the preprocessor - + @TOKEN(bad_string_literal) def t_BAD_STRING_LITERAL(self, t): - msg = "String contains invalid escape code" + msg = "String contains invalid escape code" self._error(msg, t) @TOKEN(identifier) @@ -442,40 +470,8 @@ if t.type == 'ID' and self.type_lookup_func(t.value): t.type = "TYPEID" return t - + def t_error(self, t): msg = 'Illegal character %s' % repr(t.value[0]) self._error(msg, t) - -if __name__ == "__main__": - filename = '../zp.c' - text = open(filename).read() - - #~ text = '"'+r"""ka \p ka"""+'"' - text = r""" - 546 - #line 66 "kwas\df.h" - id 4 - # 5 - dsf - """ - - def errfoo(msg, a, b): - sys.write(msg + "\n") - sys.exit() - - def typelookup(namd): - return False - - clex = CLexer(errfoo, typelookup) - clex.build() - clex.input(text) - - while 1: - tok = clex.token() - if not tok: break - - printme([tok.value, tok.type, tok.lineno, clex.filename, tok.lexpos]) - - diff --git a/lib_pypy/cffi/_pycparser/c_parser.py b/lib_pypy/cffi/_pycparser/c_parser.py --- a/lib_pypy/cffi/_pycparser/c_parser.py +++ b/lib_pypy/cffi/_pycparser/c_parser.py @@ -3,7 +3,7 @@ # # CParser class: Parser and AST builder for the C language # -# Copyright (C) 2008-2012, Eli Bendersky +# Copyright (C) 2008-2013, Eli Bendersky # License: BSD #------------------------------------------------------------------------------ import re @@ -16,64 +16,66 @@ from .ast_transforms import fix_switch_cases -class CParser(PLYParser): +class CParser(PLYParser): def __init__( - self, + self, lex_optimize=True, lextab='cffi._pycparser.lextab', yacc_optimize=True, yacctab='cffi._pycparser.yacctab', yacc_debug=False): """ Create a new CParser. - + Some arguments for controlling the debug/optimization - level of the parser are provided. The defaults are - tuned for release/performance mode. + level of the parser are provided. The defaults are + tuned for release/performance mode. The simple rules for using them are: *) When tweaking CParser/CLexer, set these to False *) When releasing a stable parser, set to True - + lex_optimize: Set to False when you're modifying the lexer. Otherwise, changes in the lexer won't be used, if some lextab.py file exists. When releasing with a stable lexer, set to True - to save the re-generation of the lexer table on + to save the re-generation of the lexer table on each run. - + lextab: Points to the lex table that's used for optimized mode. Only if you're modifying the lexer and want - some tests to avoid re-generating the table, make + some tests to avoid re-generating the table, make this point to a local lex table file (that's been earlier generated with lex_optimize=True) - + yacc_optimize: Set to False when you're modifying the parser. Otherwise, changes in the parser won't be used, if some parsetab.py file exists. When releasing with a stable parser, set to True - to save the re-generation of the parser table on + to save the re-generation of the parser table on each run. - + yacctab: Points to the yacc table that's used for optimized - mode. Only if you're modifying the parser, make + mode. Only if you're modifying the parser, make this point to a local yacc table file - + yacc_debug: Generate a parser.out file that explains how yacc built the parsing table from the grammar. """ self.clex = CLexer( error_func=self._lex_error_func, + on_lbrace_func=self._lex_on_lbrace_func, + on_rbrace_func=self._lex_on_rbrace_func, type_lookup_func=self._lex_type_lookup_func) - + self.clex.build( optimize=lex_optimize, lextab=lextab) self.tokens = self.clex.tokens - + rules_with_opt = [ 'abstract_declarator', 'assignment_expression', @@ -89,74 +91,118 @@ 'type_qualifier_list', 'struct_declarator_list' ] - + for rule in rules_with_opt: self._create_opt_rule(rule) - + self.cparser = yacc.yacc( - module=self, + module=self, start='translation_unit_or_empty', debug=yacc_debug, optimize=yacc_optimize, tabmodule=yacctab) - - # Stack of scopes for keeping track of typedefs. _scope_stack[-1] is - # the current (topmost) scope. - # - self._scope_stack = [set()] - + + # Stack of scopes for keeping track of symbols. _scope_stack[-1] is + # the current (topmost) scope. Each scope is a dictionary that + # specifies whether a name is a type. If _scope_stack[n][name] is + # True, 'name' is currently a type in the scope. If it's False, + # 'name' is used in the scope but not as a type (for instance, if we + # saw: int name; + # If 'name' is not a key in _scope_stack[n] then 'name' was not defined + # in this scope at all. + self._scope_stack = [dict()] + + # Keeps track of the last token given to yacc (the lookahead token) + self._last_yielded_token = None + def parse(self, text, filename='', debuglevel=0): """ Parses C code and returns an AST. - + text: A string containing the C source code - + filename: Name of the file being parsed (for meaningful error messages) - + debuglevel: Debug level to yacc """ self.clex.filename = filename self.clex.reset_lineno() - self._scope_stack = [set()] - return self.cparser.parse(text, lexer=self.clex, debug=debuglevel) - + self._scope_stack = [dict()] + self._last_yielded_token = None + return self.cparser.parse( + input=text, + lexer=self.clex, + debug=debuglevel) + ######################-- PRIVATE --###################### - + def _push_scope(self): - self._scope_stack.append(set()) + self._scope_stack.append(dict()) def _pop_scope(self): assert len(self._scope_stack) > 1 self._scope_stack.pop() - def _add_typedef_type(self, name): - """ Add a new typedef-name to the current scope + def _add_typedef_name(self, name, coord): + """ Add a new typedef name (ie a TYPEID) to the current scope """ - self._scope_stack[-1].add(name) - #~ print(self._scope_stack) + if not self._scope_stack[-1].get(name, True): + self._parse_error( + "Typedef %r previously declared as non-typedef " + "in this scope" % name, coord) + self._scope_stack[-1][name] = True + + def _add_identifier(self, name, coord): + """ Add a new object, function, or enum member name (ie an ID) to the + current scope + """ + if self._scope_stack[-1].get(name, False): + self._parse_error( + "Non-typedef %r previously declared as typedef " + "in this scope" % name, coord) + self._scope_stack[-1][name] = False def _is_type_in_scope(self, name): """ Is *name* a typedef-name in the current scope? """ - return any(name in scope for scope in self._scope_stack) + for scope in reversed(self._scope_stack): + # If name is an identifier in this scope it shadows typedefs in + # higher scopes. + in_scope = scope.get(name) + if in_scope is not None: return in_scope + return False def _lex_error_func(self, msg, line, column): self._parse_error(msg, self._coord(line, column)) - + + def _lex_on_lbrace_func(self): + self._push_scope() + + def _lex_on_rbrace_func(self): + self._pop_scope() + def _lex_type_lookup_func(self, name): """ Looks up types that were previously defined with - typedef. + typedef. Passed to the lexer for recognizing identifiers that are types. """ - return self._is_type_in_scope(name) - - # To understand what's going on here, read sections A.8.5 and + is_type = self._is_type_in_scope(name) + return is_type + + def _get_yacc_lookahead_token(self): + """ We need access to yacc's lookahead token in certain cases. + This is the last token yacc requested from the lexer, so we + ask the lexer. + """ + return self.clex.last_token + + # To understand what's going on here, read sections A.8.5 and # A.8.6 of K&R2 very carefully. - # + # # A C type consists of a basic type declaration, with a list # of modifiers. For example: # @@ -166,7 +212,7 @@ # the array are the modifiers. # # Basic declarations are represented by TypeDecl (from module - # c_ast) and the modifiers are FuncDecl, PtrDecl and + # c_ast) and the modifiers are FuncDecl, PtrDecl and # ArrayDecl. # # The standard states that whenever a new modifier is parsed, @@ -175,41 +221,41 @@ # # K&R2 A.8.6.2: Array Declarators # - # In a declaration T D where D has the form - # D1 [constant-expression-opt] - # and the type of the identifier in the declaration T D1 is - # "type-modifier T", the type of the + # In a declaration T D where D has the form + # D1 [constant-expression-opt] + # and the type of the identifier in the declaration T D1 is + # "type-modifier T", the type of the # identifier of D is "type-modifier array of T" # # This is what this method does. The declarator it receives - # can be a list of declarators ending with TypeDecl. It - # tacks the modifier to the end of this list, just before + # can be a list of declarators ending with TypeDecl. It + # tacks the modifier to the end of this list, just before # the TypeDecl. # - # Additionally, the modifier may be a list itself. This is + # Additionally, the modifier may be a list itself. This is # useful for pointers, that can come as a chain from the rule - # p_pointer. In this case, the whole modifier list is spliced + # p_pointer. In this case, the whole modifier list is spliced # into the new location. # def _type_modify_decl(self, decl, modifier): """ Tacks a type modifier on a declarator, and returns the modified declarator. - + Note: the declarator and modifier may be modified """ #~ print '****' #~ decl.show(offset=3) #~ modifier.show(offset=3) #~ print '****' - + modifier_head = modifier modifier_tail = modifier - + # The modifier may be a nested list. Reach its tail. # - while modifier_tail.type: + while modifier_tail.type: modifier_tail = modifier_tail.type - + # If the decl is a basic type, just tack the modifier onto # it # @@ -222,29 +268,29 @@ # pointing to the underlying basic type. # decl_tail = decl - + while not isinstance(decl_tail.type, c_ast.TypeDecl): decl_tail = decl_tail.type - + modifier_tail.type = decl_tail.type decl_tail.type = modifier_head return decl # Due to the order in which declarators are constructed, # they have to be fixed in order to look like a normal AST. - # + # # When a declaration arrives from syntax construction, it has # these problems: # * The innermost TypeDecl has no type (because the basic # type is only known at the uppermost declaration level) # * The declaration has no variable name, since that is saved # in the innermost TypeDecl - # * The typename of the declaration is a list of type + # * The typename of the declaration is a list of type # specifiers, and not a node. Here, basic identifier types # should be separated from more complex types like enums # and structs. # - # This method fixes these problem. + # This method fixes these problems. # def _fix_decl_name_type(self, decl, typename): """ Fixes a declaration. Modifies decl. @@ -254,13 +300,13 @@ type = decl while not isinstance(type, c_ast.TypeDecl): type = type.type - + decl.name = type.declname type.quals = decl.quals - - # The typename is a list of types. If any type in this + + # The typename is a list of types. If any type in this # list isn't an IdentifierType, it must be the only - # type in the list (it's illegal to declare "int enum .." + # type in the list (it's illegal to declare "int enum ..") # If all the types are basic, they're collected in the # IdentifierType holder. # @@ -272,14 +318,25 @@ else: type.type = tn return decl - - # At this point, we know that typename is a list of IdentifierType - # nodes. Concatenate all the names into a single list. - type.type = c_ast.IdentifierType( - [name for id in typename for name in id.names], - coord=typename[0].coord) + + if not typename: + # Functions default to returning int + # + if not isinstance(decl.type, c_ast.FuncDecl): + self._parse_error( + "Missing type in declaration", decl.coord) + type.type = c_ast.IdentifierType( + ['int'], + coord=decl.coord) + else: + # At this point, we know that typename is a list of IdentifierType + # nodes. Concatenate all the names into a single list. + # + type.type = c_ast.IdentifierType( + [name for id in typename for name in id.names], + coord=typename[0].coord) return decl - + def _add_declaration_specifier(self, declspec, newspec, kind): """ Declaration specifiers are represented by a dictionary with the entries: @@ -287,31 +344,115 @@ * storage: a list of storage type qualifiers * type: a list of type specifiers * function: a list of function specifiers - - This method is given a declaration specifier, and a + + This method is given a declaration specifier, and a new specifier of a given kind. - Returns the declaration specifier, with the new + Returns the declaration specifier, with the new specifier incorporated. """ spec = declspec or dict(qual=[], storage=[], type=[], function=[]) spec[kind].insert(0, newspec) return spec - - def _build_function_definition(self, decl, spec, param_decls, body): + + def _build_declarations(self, spec, decls, typedef_namespace=False): + """ Builds a list of declarations all sharing the given specifiers. + If typedef_namespace is true, each declared name is added + to the "typedef namespace", which also includes objects, + functions, and enum constants. + """ + is_typedef = 'typedef' in spec['storage'] + declarations = [] + + # Bit-fields are allowed to be unnamed. + # + if decls[0].get('bitsize') is not None: + pass + + # When redeclaring typedef names as identifiers in inner scopes, a + # problem can occur where the identifier gets grouped into + # spec['type'], leaving decl as None. This can only occur for the + # first declarator. + # + elif decls[0]['decl'] is None: + if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \ + not self._is_type_in_scope(spec['type'][-1].names[0]): + coord = '?' + for t in spec['type']: + if hasattr(t, 'coord'): + coord = t.coord + break + self._parse_error('Invalid declaration', coord) + + # Make this look as if it came from "direct_declarator:ID" + decls[0]['decl'] = c_ast.TypeDecl( + declname=spec['type'][-1].names[0], + type=None, + quals=None, + coord=spec['type'][-1].coord) + # Remove the "new" type's name from the end of spec['type'] + del spec['type'][-1] + + # A similar problem can occur where the declaration ends up looking + # like an abstract declarator. Give it a name if this is the case. + # + elif not isinstance(decls[0]['decl'], + (c_ast.Struct, c_ast.Union, c_ast.IdentifierType)): + decls_0_tail = decls[0]['decl'] + while not isinstance(decls_0_tail, c_ast.TypeDecl): + decls_0_tail = decls_0_tail.type + if decls_0_tail.declname is None: + decls_0_tail.declname = spec['type'][-1].names[0] + del spec['type'][-1] + + for decl in decls: + assert decl['decl'] is not None + if is_typedef: + declaration = c_ast.Typedef( + name=None, + quals=spec['qual'], + storage=spec['storage'], + type=decl['decl'], + coord=decl['decl'].coord) + else: + declaration = c_ast.Decl( + name=None, + quals=spec['qual'], + storage=spec['storage'], + funcspec=spec['function'], + type=decl['decl'], + init=decl.get('init'), + bitsize=decl.get('bitsize'), + coord=decl['decl'].coord) + + if isinstance(declaration.type, + (c_ast.Struct, c_ast.Union, c_ast.IdentifierType)): + fixed_decl = declaration + else: + fixed_decl = self._fix_decl_name_type(declaration, spec['type']) + + # Add the type name defined by typedef to a + # symbol table (for usage in the lexer) + # + if typedef_namespace: + if is_typedef: + self._add_typedef_name(fixed_decl.name, fixed_decl.coord) + else: + self._add_identifier(fixed_decl.name, fixed_decl.coord) + + declarations.append(fixed_decl) + + return declarations + + def _build_function_definition(self, spec, decl, param_decls, body): """ Builds a function definition. """ - declaration = c_ast.Decl( - name=None, - quals=spec['qual'], - storage=spec['storage'], - funcspec=spec['function'], - type=decl, - init=None, - bitsize=None, - coord=decl.coord) - - typename = spec['type'] - declaration = self._fix_decl_name_type(declaration, typename) + assert 'typedef' not in spec['storage'] + + declaration = self._build_declarations( + spec=spec, + decls=[dict(decl=decl, init=None)], + typedef_namespace=True)[0] + return c_ast.FuncDef( decl=declaration, param_decls=param_decls, @@ -361,29 +502,29 @@ p[0] = c_ast.FileAST(p[1]) def p_translation_unit_1(self, p): - """ translation_unit : external_declaration + """ translation_unit : external_declaration """ # Note: external_declaration is already a list # p[0] = p[1] - + def p_translation_unit_2(self, p): """ translation_unit : translation_unit external_declaration """ if p[2] is not None: p[1].extend(p[2]) p[0] = p[1] - + # Declarations always come as lists (because they can be - # several in one line), so we wrap the function definition - # into a list as well, to make the return value of + # several in one line), so we wrap the function definition + # into a list as well, to make the return value of # external_declaration homogenous. # def p_external_declaration_1(self, p): """ external_declaration : function_definition """ p[0] = [p[1]] - + def p_external_declaration_2(self, p): """ external_declaration : declaration """ @@ -393,16 +534,16 @@ """ external_declaration : pp_directive """ p[0] = p[1] - + def p_external_declaration_4(self, p): """ external_declaration : SEMI """ p[0] = None def p_pp_directive(self, p): - """ pp_directive : PPHASH + """ pp_directive : PPHASH """ - self._parse_error('Directives not supported yet', + self._parse_error('Directives not supported yet', self._coord(p.lineno(1))) # In function definitions, the declarator can be followed by @@ -411,32 +552,37 @@ def p_function_definition_1(self, p): """ function_definition : declarator declaration_list_opt compound_statement """ - # no declaration specifiers - spec = dict(qual=[], storage=[], type=[]) + # no declaration specifiers - 'int' becomes the default type + spec = dict( + qual=[], + storage=[], + type=[c_ast.IdentifierType(['int'], + coord=self._coord(p.lineno(1)))], + function=[]) p[0] = self._build_function_definition( + spec=spec, decl=p[1], - spec=spec, param_decls=p[2], body=p[3]) - + def p_function_definition_2(self, p): """ function_definition : declaration_specifiers declarator declaration_list_opt compound_statement """ spec = p[1] p[0] = self._build_function_definition( + spec=spec, decl=p[2], - spec=spec, param_decls=p[3], body=p[4]) - + def p_statement(self, p): """ statement : labeled_statement | expression_statement | compound_statement | selection_statement - | iteration_statement + | iteration_statement | jump_statement """ p[0] = p[1] @@ -454,66 +600,43 @@ """ decl_body : declaration_specifiers init_declarator_list_opt """ spec = p[1] - is_typedef = 'typedef' in spec['storage'] - decls = [] - + # p[2] (init_declarator_list_opt) is either a list or None # if p[2] is None: - # Then it's a declaration of a struct / enum tag, - # without an actual declarator. + # By the standard, you must have at least one declarator unless + # declaring a structure tag, a union tag, or the members of an + # enumeration. # ty = spec['type'] - if len(ty) > 1: - coord = '?' - for t in ty: - if hasattr(t, 'coord'): - coord = t.coord - break - - self._parse_error('Multiple type specifiers with a type tag', - coord) - - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - storage=spec['storage'], - funcspec=spec['function'], - type=ty[0], - init=None, - bitsize=None, - coord=ty[0].coord) - decls = [decl] + s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum) + if len(ty) == 1 and isinstance(ty[0], s_u_or_e): + decls = [c_ast.Decl( + name=None, + quals=spec['qual'], + storage=spec['storage'], + funcspec=spec['function'], + type=ty[0], + init=None, + bitsize=None, + coord=ty[0].coord)] + + # However, this case can also occur on redeclared identifiers in + # an inner scope. The trouble is that the redeclared type's name + # gets grouped into declaration_specifiers; _build_declarations + # compensates for this. + # + else: + decls = self._build_declarations( + spec=spec, + decls=[dict(decl=None, init=None)], + typedef_namespace=True) + else: - for decl, init in p[2] or []: - if is_typedef: - decl = c_ast.Typedef( - name=None, - quals=spec['qual'], - storage=spec['storage'], - type=decl, - coord=decl.coord) - else: - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - storage=spec['storage'], - funcspec=spec['function'], - type=decl, - init=init, - bitsize=None, - coord=decl.coord) - - typename = spec['type'] - fixed_decl = self._fix_decl_name_type(decl, typename) - - # Add the type name defined by typedef to a - # symbol table (for usage in the lexer) - # - if is_typedef: - self._add_typedef_type(fixed_decl.name) - - decls.append(fixed_decl) + decls = self._build_declarations( + spec=spec, + decls=p[2], + typedef_namespace=True) p[0] = decls @@ -522,7 +645,7 @@ # for defining typedefs. # # If a typedef line was directly followed by a line using the - # type defined with the typedef, the type would not be + # type defined with the typedef, the type would not be # recognized. This is because to reduce the declaration rule, # the parser's lookahead asked for the token after SEMI, which # was the type from the next line, and the lexer had no chance @@ -532,42 +655,41 @@ # the parser reduces decl_body, which actually adds the new # type into the table to be seen by the lexer before the next # line is reached. - # def p_declaration(self, p): - """ declaration : decl_body SEMI + """ declaration : decl_body SEMI """ p[0] = p[1] # Since each declaration is a list of declarations, this # rule will combine all the declarations and return a single # list - # + # def p_declaration_list(self, p): """ declaration_list : declaration | declaration_list declaration """ p[0] = p[1] if len(p) == 2 else p[1] + p[2] - + def p_declaration_specifiers_1(self, p): - """ declaration_specifiers : type_qualifier declaration_specifiers_opt + """ declaration_specifiers : type_qualifier declaration_specifiers_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'qual') - + def p_declaration_specifiers_2(self, p): """ declaration_specifiers : type_specifier declaration_specifiers_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'type') - + def p_declaration_specifiers_3(self, p): """ declaration_specifiers : storage_class_specifier declaration_specifiers_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'storage') - + def p_declaration_specifiers_4(self, p): """ declaration_specifiers : function_specifier declaration_specifiers_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'function') - + def p_storage_class_specifier(self, p): """ storage_class_specifier : AUTO | REGISTER @@ -576,12 +698,12 @@ | TYPEDEF """ p[0] = p[1] - + def p_function_specifier(self, p): """ function_specifier : INLINE """ p[0] = p[1] - + def p_type_specifier_1(self, p): """ type_specifier : VOID | _BOOL @@ -603,34 +725,52 @@ | struct_or_union_specifier """ p[0] = p[1] - + def p_type_qualifier(self, p): """ type_qualifier : CONST | RESTRICT | VOLATILE """ p[0] = p[1] - - def p_init_declarator_list(self, p): + + def p_init_declarator_list_1(self, p): """ init_declarator_list : init_declarator | init_declarator_list COMMA init_declarator """ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] - # Returns a (declarator, initializer) pair - # If there's no initializer, returns (declarator, None) + # If the code is declaring a variable that was declared a typedef in an + # outer scope, yacc will think the name is part of declaration_specifiers, + # not init_declarator, and will then get confused by EQUALS. Pass None + # up in place of declarator, and handle this at a higher level. + # + def p_init_declarator_list_2(self, p): + """ init_declarator_list : EQUALS initializer + """ + p[0] = [dict(decl=None, init=p[2])] + + # Similarly, if the code contains duplicate typedefs of, for example, + # array types, the array portion will appear as an abstract declarator. + # + def p_init_declarator_list_3(self, p): + """ init_declarator_list : abstract_declarator + """ + p[0] = [dict(decl=p[1], init=None)] + + # Returns a {decl= : init=} dictionary + # If there's no initializer, uses None # def p_init_declarator(self, p): """ init_declarator : declarator | declarator EQUALS initializer """ - p[0] = (p[1], p[3] if len(p) > 2 else None) - + p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None)) + def p_specifier_qualifier_list_1(self, p): """ specifier_qualifier_list : type_qualifier specifier_qualifier_list_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'qual') - + def p_specifier_qualifier_list_2(self, p): """ specifier_qualifier_list : type_specifier specifier_qualifier_list_opt """ @@ -645,8 +785,8 @@ """ klass = self._select_struct_union_class(p[1]) p[0] = klass( - name=p[2], - decls=None, + name=p[2], + decls=None, coord=self._coord(p.lineno(2))) def p_struct_or_union_specifier_2(self, p): @@ -669,7 +809,7 @@ coord=self._coord(p.lineno(2))) def p_struct_or_union(self, p): - """ struct_or_union : STRUCT + """ struct_or_union : STRUCT | UNION """ p[0] = p[1] @@ -686,59 +826,60 @@ """ struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI """ spec = p[1] - decls = [] - + assert 'typedef' not in spec['storage'] + if p[2] is not None: - for struct_decl in p[2]: - if struct_decl['decl'] is not None: - decl_coord = struct_decl['decl'].coord - else: - decl_coord = struct_decl['bitsize'].coord - - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - funcspec=spec['function'], - storage=spec['storage'], - type=struct_decl['decl'], - init=None, - bitsize=struct_decl['bitsize'], - coord=decl_coord) - - typename = spec['type'] - decls.append(self._fix_decl_name_type(decl, typename)) - else: + decls = self._build_declarations( + spec=spec, + decls=p[2]) + + elif len(spec['type']) == 1: # Anonymous struct/union, gcc extension, C1x feature. - # Although the standard only allows structs/unions here, I see no + # Although the standard only allows structs/unions here, I see no # reason to disallow other types since some compilers have typedefs # here, and pycparser isn't about rejecting all invalid code. - # + # node = spec['type'][0] - if isinstance(node, c_ast.Node): decl_type = node else: decl_type = c_ast.IdentifierType(node) - - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - funcspec=spec['function'], - storage=spec['storage'], - type=decl_type, - init=None, - bitsize=None, - coord=self._coord(p.lineno(3))) - decls.append(decl) - + + decls = self._build_declarations( + spec=spec, + decls=[dict(decl=decl_type)]) + + else: + # Structure/union members can have the same names as typedefs. + # The trouble is that the member's name gets grouped into + # specifier_qualifier_list; _build_declarations compensates. + # + decls = self._build_declarations( + spec=spec, + decls=[dict(decl=None, init=None)]) + p[0] = decls - + + def p_struct_declaration_2(self, p): + """ struct_declaration : specifier_qualifier_list abstract_declarator SEMI + """ + # "Abstract declarator?!", you ask? Structure members can have the + # same names as typedefs. The trouble is that the member's name gets + # grouped into specifier_qualifier_list, leaving any remainder to + # appear as an abstract declarator, as in: + # typedef int Foo; + # struct { Foo Foo[3]; }; + # + p[0] = self._build_declarations( + spec=p[1], + decls=[dict(decl=p[2], init=None)]) + def p_struct_declarator_list(self, p): """ struct_declarator_list : struct_declarator | struct_declarator_list COMMA struct_declarator """ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] - + # struct_declarator passes up a dict with the keys: decl (for # the underlying declarator) and bitsize (for the bitsize) # @@ -746,7 +887,7 @@ """ struct_declarator : declarator """ p[0] = {'decl': p[1], 'bitsize': None} - + def p_struct_declarator_2(self, p): """ struct_declarator : declarator COLON constant_expression | COLON constant_expression @@ -755,24 +896,24 @@ p[0] = {'decl': p[1], 'bitsize': p[3]} else: p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]} - + def p_enum_specifier_1(self, p): """ enum_specifier : ENUM ID | ENUM TYPEID """ p[0] = c_ast.Enum(p[2], None, self._coord(p.lineno(1))) - + def p_enum_specifier_2(self, p): """ enum_specifier : ENUM brace_open enumerator_list brace_close """ p[0] = c_ast.Enum(None, p[3], self._coord(p.lineno(1))) - + def p_enum_specifier_3(self, p): """ enum_specifier : ENUM ID brace_open enumerator_list brace_close | ENUM TYPEID brace_open enumerator_list brace_close """ p[0] = c_ast.Enum(p[2], p[4], self._coord(p.lineno(1))) - + def p_enumerator_list(self, p): """ enumerator_list : enumerator | enumerator_list COMMA @@ -791,95 +932,130 @@ | ID EQUALS constant_expression """ if len(p) == 2: - p[0] = c_ast.Enumerator( - p[1], None, + enumerator = c_ast.Enumerator( + p[1], None, self._coord(p.lineno(1))) else: - p[0] = c_ast.Enumerator( - p[1], p[3], + enumerator = c_ast.Enumerator( + p[1], p[3], self._coord(p.lineno(1))) - + self._add_identifier(enumerator.name, enumerator.coord) + + p[0] = enumerator + def p_declarator_1(self, p): - """ declarator : direct_declarator + """ declarator : direct_declarator """ p[0] = p[1] - + def p_declarator_2(self, p): - """ declarator : pointer direct_declarator + """ declarator : pointer direct_declarator """ p[0] = self._type_modify_decl(p[2], p[1]) - + + # Since it's impossible for a type to be specified after a pointer, assume + # it's intended to be the name for this declaration. _add_identifier will + # raise an error if this TYPEID can't be redeclared. + # + def p_declarator_3(self, p): + """ declarator : pointer TYPEID + """ + decl = c_ast.TypeDecl( + declname=p[2], + type=None, + quals=None, + coord=self._coord(p.lineno(2))) + + p[0] = self._type_modify_decl(decl, p[1]) + def p_direct_declarator_1(self, p): - """ direct_declarator : ID + """ direct_declarator : ID """ p[0] = c_ast.TypeDecl( - declname=p[1], - type=None, + declname=p[1], + type=None, quals=None, coord=self._coord(p.lineno(1))) - + def p_direct_declarator_2(self, p): - """ direct_declarator : LPAREN declarator RPAREN + """ direct_declarator : LPAREN declarator RPAREN """ p[0] = p[2] - + def p_direct_declarator_3(self, p): - """ direct_declarator : direct_declarator LBRACKET assignment_expression_opt RBRACKET + """ direct_declarator : direct_declarator LBRACKET assignment_expression_opt RBRACKET """ arr = c_ast.ArrayDecl( type=None, dim=p[3], coord=p[1].coord) - + p[0] = self._type_modify_decl(decl=p[1], modifier=arr) # Special for VLAs # def p_direct_declarator_4(self, p): - """ direct_declarator : direct_declarator LBRACKET TIMES RBRACKET + """ direct_declarator : direct_declarator LBRACKET TIMES RBRACKET """ arr = c_ast.ArrayDecl( type=None, dim=c_ast.ID(p[3], self._coord(p.lineno(3))), coord=p[1].coord) - + p[0] = self._type_modify_decl(decl=p[1], modifier=arr) def p_direct_declarator_5(self, p): - """ direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN + """ direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN | direct_declarator LPAREN identifier_list_opt RPAREN """ func = c_ast.FuncDecl( args=p[3], type=None, coord=p[1].coord) - + + # To see why _get_yacc_lookahead_token is needed, consider: + # typedef char TT; + # void foo(int TT) { TT = 10; } + # Outside the function, TT is a typedef, but inside (starting and + # ending with the braces) it's a parameter. The trouble begins with + # yacc's lookahead token. We don't know if we're declaring or + # defining a function until we see LBRACE, but if we wait for yacc to + # trigger a rule on that token, then TT will have already been read + # and incorrectly interpreted as TYPEID. We need to add the + # parameters to the scope the moment the lexer sees LBRACE. + # + if self._get_yacc_lookahead_token().type == "LBRACE": + if func.args is not None: + for param in func.args.params: + if isinstance(param, c_ast.EllipsisParam): break + self._add_identifier(param.name, param.coord) + p[0] = self._type_modify_decl(decl=p[1], modifier=func) - + def p_pointer(self, p): """ pointer : TIMES type_qualifier_list_opt | TIMES type_qualifier_list_opt pointer """ coord = self._coord(p.lineno(1)) - + p[0] = c_ast.PtrDecl( quals=p[2] or [], type=p[3] if len(p) > 3 else None, coord=coord) - + def p_type_qualifier_list(self, p): """ type_qualifier_list : type_qualifier | type_qualifier_list type_qualifier """ p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]] - + def p_parameter_type_list(self, p): """ parameter_type_list : parameter_list | parameter_list COMMA ELLIPSIS """ - if len(p) > 2: + if len(p) > 2: p[1].params.append(c_ast.EllipsisParam(self._coord(p.lineno(3)))) - + p[0] = p[1] def p_parameter_list(self, p): @@ -896,33 +1072,43 @@ """ parameter_declaration : declaration_specifiers declarator """ spec = p[1] - decl = p[2] - - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - storage=spec['storage'], - funcspec=spec['function'], - type=decl, - init=None, - bitsize=None, - coord=decl.coord) - - typename = spec['type'] or ['int'] - p[0] = self._fix_decl_name_type(decl, typename) - + if not spec['type']: + spec['type'] = [c_ast.IdentifierType(['int'], + coord=self._coord(p.lineno(1)))] + p[0] = self._build_declarations( + spec=spec, + decls=[dict(decl=p[2])])[0] + def p_parameter_declaration_2(self, p): """ parameter_declaration : declaration_specifiers abstract_declarator_opt """ spec = p[1] - decl = c_ast.Typename( - quals=spec['qual'], - type=p[2] or c_ast.TypeDecl(None, None, None), - coord=self._coord(p.lineno(2))) - - typename = spec['type'] or ['int'] - p[0] = self._fix_decl_name_type(decl, typename) - + if not spec['type']: + spec['type'] = [c_ast.IdentifierType(['int'], + coord=self._coord(p.lineno(1)))] + + # Parameters can have the same names as typedefs. The trouble is that + # the parameter's name gets grouped into declaration_specifiers, making + # it look like an old-style declaration; compensate. + # + if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \ + self._is_type_in_scope(spec['type'][-1].names[0]): + decl = self._build_declarations( + spec=spec, + decls=[dict(decl=p[2], init=None)])[0] + + # This truly is an old-style parameter declaration + # + else: + decl = c_ast.Typename( + quals=spec['qual'], + type=p[2] or c_ast.TypeDecl(None, None, None), + coord=self._coord(p.lineno(2))) + typename = spec['type'] + decl = self._fix_decl_name_type(decl, typename) + + p[0] = decl + def p_identifier_list(self, p): """ identifier_list : identifier | identifier_list COMMA identifier @@ -937,7 +1123,7 @@ """ initializer : assignment_expression """ p[0] = p[1] From noreply at buildbot.pypy.org Fri Mar 7 01:37:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 7 Mar 2014 01:37:09 +0100 (CET) Subject: [pypy-commit] pypy default: transplant simplifications to test_select from py3k Message-ID: <20140307003709.0479D1C1007@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69768:0b343f4144e4 Date: 2014-03-06 19:34 -0500 http://bitbucket.org/pypy/pypy/changeset/0b343f4144e4/ Log: transplant simplifications to test_select from py3k diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -43,7 +43,7 @@ try: iwtd, owtd, ewtd = select.select([readend], [], [], 0) assert iwtd == owtd == ewtd == [] - writeend.send('X') + writeend.send(b'X') iwtd, owtd, ewtd = select.select([readend], [], []) assert iwtd == [readend] assert owtd == ewtd == [] @@ -84,7 +84,7 @@ if owtd == []: break assert owtd == [writeend] - total_out += writeend.send('x' * 512) + total_out += writeend.send(b'x' * 512) total_in = 0 while True: iwtd, owtd, ewtd = select.select([readend], [], [], 0) @@ -94,7 +94,7 @@ assert iwtd == [readend] data = readend.recv(4096) assert len(data) > 0 - assert data == 'x' * len(data) + assert data == b'x' * len(data) total_in += len(data) assert total_in == total_out finally: @@ -110,7 +110,7 @@ readend, writeend = self.getpair() try: try: - total_out = writeend.send('x' * 512) + total_out = writeend.send(b'x' * 512) finally: # win32 sends the 'closed' event immediately, even when # more data is available @@ -126,7 +126,7 @@ data = readend.recv(4096) if len(data) == 0: break - assert data == 'x' * len(data) + assert data == b'x' * len(data) total_in += len(data) # win32: check that closing the socket exits the loop if sys.platform == 'win32' and total_in == total_out: @@ -171,12 +171,12 @@ for i in range(50): n = (i*3) % 10 - writeends[n].send('X') + writeends[n].send(b'X') iwtd, owtd, ewtd = select.select(readends, [], []) assert iwtd == [readends[n]] assert owtd == ewtd == [] data = readends[n].recv(1) - assert data == 'X' + assert data == b'X' finally: for fd in readends + writeends: @@ -251,34 +251,30 @@ "usemodules": ["select", "_socket", "rctime", "thread"], } - def setup_class(cls): - space = cls.space - w_import = space.getattr(space.builtin, space.wrap("__import__")) - w_socketmod = space.call_function(w_import, space.wrap("socket")) - cls.w_sock = cls.space.call_method(w_socketmod, "socket") - cls.w_sock_err = space.getattr(w_socketmod, space.wrap("error")) - - try_ports = [1023] + range(20000, 30000, 437) + def w_make_server(self): + import socket + if hasattr(self, 'sock'): + return self.sock + self.sock = socket.socket() + try_ports = [1023] + list(range(20000, 30000, 437)) for port in try_ports: - print 'binding to port %d:' % (port,), - cls.w_sockaddress = space.wrap(('127.0.0.1', port)) + print('binding to port %d:' % (port,)) + self.sockaddress = ('127.0.0.1', port) try: - space.call_method(cls.w_sock, "bind", cls.w_sockaddress) + self.sock.bind(self.sockaddress) break - except OperationError, e: # should get a "Permission denied" - if not e.match(space, space.getattr(w_socketmod, space.wrap("error"))): - raise - print e.errorstr(space) - except cls.w_sock_err, e: # should get a "Permission denied" - print e + except socket.error as e: # should get a "Permission denied" + print(e) else: - raise e + raise(e) def w_getpair(self): """Helper method which returns a pair of connected sockets.""" import socket import thread + self.make_server() + self.sock.listen(1) s2 = socket.socket() thread.start_new_thread(s2.connect, (self.sockaddress,)) From noreply at buildbot.pypy.org Fri Mar 7 01:38:57 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 7 Mar 2014 01:38:57 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: merge default Message-ID: <20140307003857.135A01C1007@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69769:f3ee559b99c5 Date: 2014-03-06 19:38 -0500 http://bitbucket.org/pypy/pypy/changeset/f3ee559b99c5/ Log: merge default diff too long, truncating to 2000 out of 4725 lines diff --git a/lib_pypy/cffi/_pycparser/README b/lib_pypy/cffi/_pycparser/README new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/_pycparser/README @@ -0,0 +1,12 @@ +This is a copy of pycparser. See __init__.py for the version. + +Note that the following two lines have been modified in c_parser.py: + + +class CParser(PLYParser): + def __init__( + ... + lextab='cffi._pycparser.lextab', + ^^^^^^^^^^^^^^^ + yacctab='cffi._pycparser.yacctab', + ^^^^^^^^^^^^^^^ diff --git a/lib_pypy/cffi/_pycparser/__init__.py b/lib_pypy/cffi/_pycparser/__init__.py --- a/lib_pypy/cffi/_pycparser/__init__.py +++ b/lib_pypy/cffi/_pycparser/__init__.py @@ -1,14 +1,14 @@ #----------------------------------------------------------------- # pycparser: __init__.py # -# This package file exports some convenience functions for +# This package file exports some convenience functions for # interacting with pycparser # # Copyright (C) 2008-2012, Eli Bendersky # License: BSD #----------------------------------------------------------------- __all__ = ['c_lexer', 'c_parser', 'c_ast'] -__version__ = '2.09.1' +__version__ = '2.10' from subprocess import Popen, PIPE from .c_parser import CParser @@ -26,12 +26,12 @@ arguments. When successful, returns the preprocessed file's contents. - Errors from cpp will be printed out. + Errors from cpp will be printed out. """ path_list = [cpp_path] if isinstance(cpp_args, list): path_list += cpp_args - elif cpp_args != '': + elif cpp_args != '': path_list += [cpp_args] path_list += [filename] @@ -39,8 +39,8 @@ # Note the use of universal_newlines to treat all newlines # as \n for Python's purpose # - pipe = Popen( path_list, - stdout=PIPE, + pipe = Popen( path_list, + stdout=PIPE, universal_newlines=True) text = pipe.communicate()[0] except OSError as e: @@ -77,10 +77,10 @@ parser: Optional parser object to be used instead of the default CParser - When successful, an AST is returned. ParseError can be + When successful, an AST is returned. ParseError can be thrown if the file doesn't parse successfully. - Errors from cpp will be printed out. + Errors from cpp will be printed out. """ if use_cpp: text = preprocess_file(filename, cpp_path, cpp_args) diff --git a/lib_pypy/cffi/_pycparser/_build_tables.py b/lib_pypy/cffi/_pycparser/_build_tables.py --- a/lib_pypy/cffi/_pycparser/_build_tables.py +++ b/lib_pypy/cffi/_pycparser/_build_tables.py @@ -1,7 +1,7 @@ #----------------------------------------------------------------- # pycparser: _build_tables.py # -# A dummy for generating the lexing/parsing tables and and +# A dummy for generating the lexing/parsing tables and and # compiling them into .pyc for faster execution in optimized mode. # Also generates AST code from the configuration file. # Should be called from the pycparser directory. @@ -17,14 +17,14 @@ ast_gen.generate(open('c_ast.py', 'w')) import sys -sys.path.extend(['.', '..']) +sys.path[0:0] = ['.', '..'] from pycparser import c_parser # Generates the tables # c_parser.CParser( - lex_optimize=True, - yacc_debug=False, + lex_optimize=True, + yacc_debug=False, yacc_optimize=True) # Load to compile into .pyc diff --git a/lib_pypy/cffi/_pycparser/_c_ast.cfg b/lib_pypy/cffi/_pycparser/_c_ast.cfg --- a/lib_pypy/cffi/_pycparser/_c_ast.cfg +++ b/lib_pypy/cffi/_pycparser/_c_ast.cfg @@ -29,7 +29,7 @@ Cast: [to_type*, expr*] -# Compound statement in C99 is a list of block items (declarations or +# Compound statement in C99 is a list of block items (declarations or # statements). # Compound: [block_items**] @@ -37,7 +37,7 @@ # Compound literal (anonymous aggregate) for C99. # (type-name) {initializer_list} # type: the typename -# init: InitExprList for the initializer list +# init: InitList for the initializer list # CompoundLiteral: [type*, init*] diff --git a/lib_pypy/cffi/_pycparser/c_generator.py b/lib_pypy/cffi/_pycparser/c_generator.py --- a/lib_pypy/cffi/_pycparser/c_generator.py +++ b/lib_pypy/cffi/_pycparser/c_generator.py @@ -11,34 +11,34 @@ class CGenerator(object): """ Uses the same visitor pattern as c_ast.NodeVisitor, but modified to - return a value from each visit method, using string accumulation in + return a value from each visit method, using string accumulation in generic_visit. """ def __init__(self): self.output = '' - + # Statements start with indentation of self.indent_level spaces, using # the _make_indent method # self.indent_level = 0 - + def _make_indent(self): return ' ' * self.indent_level - + def visit(self, node): method = 'visit_' + node.__class__.__name__ return getattr(self, method, self.generic_visit)(node) - + def generic_visit(self, node): #~ print('generic:', type(node)) if node is None: return '' else: return ''.join(self.visit(c) for c in node.children()) - + def visit_Constant(self, n): return n.value - + def visit_ID(self, n): return n.name @@ -61,22 +61,22 @@ elif n.op == 'p--': return '%s--' % operand elif n.op == 'sizeof': - # Always parenthesize the argument of sizeof since it can be + # Always parenthesize the argument of sizeof since it can be # a name. return 'sizeof(%s)' % self.visit(n.expr) else: return '%s%s' % (n.op, operand) def visit_BinaryOp(self, n): - lval_str = self._parenthesize_if(n.left, + lval_str = self._parenthesize_if(n.left, lambda d: not self._is_simple_node(d)) - rval_str = self._parenthesize_if(n.right, + rval_str = self._parenthesize_if(n.right, lambda d: not self._is_simple_node(d)) return '%s %s %s' % (lval_str, n.op, rval_str) def visit_Assignment(self, n): rval_str = self._parenthesize_if( - n.rvalue, + n.rvalue, lambda n: isinstance(n, c_ast.Assignment)) return '%s %s %s' % (self.visit(n.lvalue), n.op, rval_str) @@ -101,7 +101,7 @@ def visit_DeclList(self, n): s = self.visit(n.decls[0]) if len(n.decls) > 1: - s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True) + s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True) for decl in n.decls[1:]) return s @@ -112,7 +112,7 @@ return s def visit_Cast(self, n): - s = '(' + self._generate_type(n.to_type) + ')' + s = '(' + self._generate_type(n.to_type) + ')' return s + ' ' + self._parenthesize_unless_simple(n.expr) def visit_ExprList(self, n): @@ -127,8 +127,10 @@ def visit_InitList(self, n): visited_subexprs = [] for expr in n.exprs: - if isinstance(expr, c_ast.InitList): + if isinstance(expr, c_ast.ExprList): visited_subexprs.append('(' + self.visit(expr) + ')') + elif isinstance(expr, c_ast.InitList): + visited_subexprs.append('{' + self.visit(expr) + '}') else: visited_subexprs.append(self.visit(expr)) return ', '.join(visited_subexprs) @@ -140,9 +142,9 @@ s += ' {' for i, enumerator in enumerate(n.values.enumerators): s += enumerator.name - if enumerator.value: + if enumerator.value: s += ' = ' + self.visit(enumerator.value) - if i != len(n.values.enumerators) - 1: + if i != len(n.values.enumerators) - 1: s += ', ' s += '}' return s @@ -203,7 +205,7 @@ if n.cond: s += self.visit(n.cond) s += ')\n' s += self._generate_stmt(n.iftrue, add_indent=True) - if n.iffalse: + if n.iffalse: s += self._make_indent() + 'else\n' s += self._generate_stmt(n.iffalse, add_indent=True) return s @@ -265,7 +267,7 @@ def visit_Typename(self, n): return self._generate_type(n.type) - + def visit_Union(self, n): return self._generate_struct_union(n, 'union') @@ -280,13 +282,13 @@ return s def _generate_struct_union(self, n, name): - """ Generates code for structs and unions. name should be either + """ Generates code for structs and unions. name should be either 'struct' or union. """ s = name + ' ' + (n.name or '') if n.decls: s += '\n' - s += self._make_indent() + s += self._make_indent() self.indent_level += 2 s += '{\n' for decl in n.decls: @@ -297,25 +299,26 @@ def _generate_stmt(self, n, add_indent=False): """ Generation from a statement node. This method exists as a wrapper - for individual visit_* methods to handle different treatment of + for individual visit_* methods to handle different treatment of some statements in this context. """ typ = type(n) if add_indent: self.indent_level += 2 indent = self._make_indent() if add_indent: self.indent_level -= 2 - - if typ in ( + + if typ in ( c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp, c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef, - c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef): + c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef, + c_ast.ExprList): # These can also appear in an expression context so no semicolon # is added to them automatically # return indent + self.visit(n) + ';\n' elif typ in (c_ast.Compound,): - # No extra indentation required before the opening brace of a - # compound - because it consists of multiple lines it has to + # No extra indentation required before the opening brace of a + # compound - because it consists of multiple lines it has to # compute its own indentation. # return self.visit(n) @@ -330,21 +333,21 @@ if n.storage: s += ' '.join(n.storage) + ' ' s += self._generate_type(n.type) return s - + def _generate_type(self, n, modifiers=[]): - """ Recursive generation from a type node. n is the type node. - modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers + """ Recursive generation from a type node. n is the type node. + modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers encountered on the way down to a TypeDecl, to allow proper generation from it. """ typ = type(n) #~ print(n, modifiers) - + if typ == c_ast.TypeDecl: s = '' if n.quals: s += ' '.join(n.quals) + ' ' s += self.visit(n.type) - + nstr = n.declname if n.declname else '' # Resolve modifiers. # Wrap in parens to distinguish pointer to array and pointer to @@ -396,7 +399,7 @@ """ Returns True for nodes that are "simple" - i.e. nodes that always have higher precedence than operators. """ - return isinstance(n,( c_ast.Constant, c_ast.ID, c_ast.ArrayRef, + return isinstance(n,( c_ast.Constant, c_ast.ID, c_ast.ArrayRef, c_ast.StructRef, c_ast.FuncCall)) diff --git a/lib_pypy/cffi/_pycparser/c_lexer.py b/lib_pypy/cffi/_pycparser/c_lexer.py --- a/lib_pypy/cffi/_pycparser/c_lexer.py +++ b/lib_pypy/cffi/_pycparser/c_lexer.py @@ -1,11 +1,11 @@ +#------------------------------------------------------------------------------ # pycparser: c_lexer.py # # CLexer class: lexer for the C language # -# Copyright (C) 2008-2011, Eli Bendersky +# Copyright (C) 2008-2013, Eli Bendersky # License: BSD -#----------------------------------------------------------------- - +#------------------------------------------------------------------------------ import re import sys @@ -15,41 +15,50 @@ class CLexer(object): """ A lexer for the C language. After building it, set the - input text with input(), and call token() to get new + input text with input(), and call token() to get new tokens. - + The public attribute filename can be set to an initial - filaneme, but the lexer will update it upon #line + filaneme, but the lexer will update it upon #line directives. """ - def __init__(self, error_func, type_lookup_func): + def __init__(self, error_func, on_lbrace_func, on_rbrace_func, + type_lookup_func): """ Create a new Lexer. - + error_func: An error function. Will be called with an error - message, line and column as arguments, in case of + message, line and column as arguments, in case of an error during lexing. - + + on_lbrace_func, on_rbrace_func: + Called when an LBRACE or RBRACE is encountered + (likely to push/pop type_lookup_func's scope) + type_lookup_func: A type lookup function. Given a string, it must return True IFF this string is a name of a type that was defined with a typedef earlier. """ self.error_func = error_func + self.on_lbrace_func = on_lbrace_func + self.on_rbrace_func = on_rbrace_func self.type_lookup_func = type_lookup_func self.filename = '' - + + # Keeps track of the last token returned from self.token() + self.last_token = None + # Allow either "# line" or "# " to support GCC's # cpp output # self.line_pattern = re.compile('([ \t]*line\W)|([ \t]*\d+)') - self.pragma_pattern = re.compile('[ \t]*pragma\W') def build(self, **kwargs): """ Builds the lexer from the specification. Must be - called after the lexer object is created. - + called after the lexer object is created. + This method exists separately, because the PLY manual warns against calling lex.lex inside __init__ @@ -63,10 +72,10 @@ def input(self, text): self.lexer.input(text) - + def token(self): - g = self.lexer.token() - return g + self.last_token = self.lexer.token() + return self.last_token def find_tok_column(self, token): """ Find the column of the token in its line. @@ -75,7 +84,7 @@ return token.lexpos - last_cr ######################-- PRIVATE --###################### - + ## ## Internal auxiliary methods ## @@ -83,10 +92,10 @@ location = self._make_tok_location(token) self.error_func(msg, location[0], location[1]) self.lexer.skip(1) - + def _make_tok_location(self, token): return (token.lineno, self.find_tok_column(token)) - + ## ## Reserved keywords ## @@ -113,35 +122,35 @@ ## tokens = keywords + ( # Identifiers - 'ID', - - # Type identifiers (identifiers previously defined as + 'ID', + + # Type identifiers (identifiers previously defined as # types with typedef) 'TYPEID', - - # constants + + # constants 'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX', 'FLOAT_CONST', 'HEX_FLOAT_CONST', 'CHAR_CONST', 'WCHAR_CONST', - + # String literals 'STRING_LITERAL', 'WSTRING_LITERAL', - # Operators + # Operators 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD', 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', 'LOR', 'LAND', 'LNOT', 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', - + # Assignment - 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', + 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL', - 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', + 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL', - # Increment/decrement + # Increment/decrement 'PLUSPLUS', 'MINUSMINUS', # Structure dereference (->) @@ -149,18 +158,18 @@ # Conditional operator (?) 'CONDOP', - - # Delimeters + + # Delimeters 'LPAREN', 'RPAREN', # ( ) 'LBRACKET', 'RBRACKET', # [ ] - 'LBRACE', 'RBRACE', # { } + 'LBRACE', 'RBRACE', # { } 'COMMA', 'PERIOD', # . , 'SEMI', 'COLON', # ; : # Ellipsis (...) 'ELLIPSIS', - - # pre-processor + + # pre-processor 'PPHASH', # '#' ) @@ -169,18 +178,18 @@ ## ## - # valid C identifiers (K&R2: A.2.3) - identifier = r'[a-zA-Z_][0-9a-zA-Z_]*' + # valid C identifiers (K&R2: A.2.3), plus '$' (supported by some compilers) + identifier = r'[a-zA-Z_$][0-9a-zA-Z_$]*' hex_prefix = '0[xX]' hex_digits = '[0-9a-fA-F]+' # integer constants (K&R2: A.2.5.1) - integer_suffix_opt = r'(u?ll|U?LL|([uU][lL])|([lL][uU])|[uU]|[lL])?' + integer_suffix_opt = r'(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?' decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')' octal_constant = '0[0-7]*'+integer_suffix_opt hex_constant = hex_prefix+hex_digits+integer_suffix_opt - + bad_octal_constant = '0[0-7]*[89]' # character constants (K&R2: A.2.5.2) @@ -196,14 +205,14 @@ bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])""" escape_sequence = r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))' - cconst_char = r"""([^'\\\n]|"""+escape_sequence+')' + cconst_char = r"""([^'\\\n]|"""+escape_sequence+')' char_const = "'"+cconst_char+"'" wchar_const = 'L'+char_const unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)" bad_char_const = r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+bad_escape+r"""[^'\n]*')""" # string literals (K&R2: A.2.6) - string_char = r"""([^"\\\n]|"""+escape_sequence+')' + string_char = r"""([^"\\\n]|"""+escape_sequence+')' string_literal = '"'+string_char+'*"' wstring_literal = 'L'+string_literal bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"' @@ -221,14 +230,14 @@ ## states = ( # ppline: preprocessor line directives - # + # ('ppline', 'exclusive'), # pppragma: pragma # ('pppragma', 'exclusive'), ) - + def t_PPHASH(self, t): r'[ \t]*\#' if self.line_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos): @@ -239,7 +248,7 @@ else: t.type = 'PPHASH' return t - + ## ## Rules for the ppline state ## @@ -261,21 +270,21 @@ def t_ppline_NEWLINE(self, t): r'\n' - + if self.pp_line is None: self._error('line number missing in #line', t) else: self.lexer.lineno = int(self.pp_line) - + if self.pp_filename is not None: self.filename = self.pp_filename - + t.lexer.begin('INITIAL') def t_ppline_PPLINE(self, t): r'line' pass - + t_ppline_ignore = ' \t' def t_ppline_error(self, t): @@ -292,7 +301,7 @@ def t_pppragma_PPPRAGMA(self, t): r'pragma' pass - + t_pppragma_ignore = ' \t<>.-{}();+-*/$%@&^~!?:,0123456789' @TOKEN(string_literal) @@ -364,17 +373,36 @@ t_RPAREN = r'\)' t_LBRACKET = r'\[' t_RBRACKET = r'\]' - t_LBRACE = r'\{' - t_RBRACE = r'\}' t_COMMA = r',' t_PERIOD = r'\.' t_SEMI = r';' t_COLON = r':' t_ELLIPSIS = r'\.\.\.' - t_STRING_LITERAL = string_literal - - # The following floating and integer constants are defined as + # Scope delimiters + # To see why on_lbrace_func is needed, consider: + # typedef char TT; + # void foo(int TT) { TT = 10; } + # TT x = 5; + # Outside the function, TT is a typedef, but inside (starting and ending + # with the braces) it's a parameter. The trouble begins with yacc's + # lookahead token. If we open a new scope in brace_open, then TT has + # already been read and incorrectly interpreted as TYPEID. So, we need + # to open and close scopes from within the lexer. + # Similar for the TT immediately outside the end of the function. + # + @TOKEN(r'\{') + def t_LBRACE(self, t): + self.on_lbrace_func() + return t + @TOKEN(r'\}') + def t_RBRACE(self, t): + self.on_rbrace_func() + return t + + t_STRING_LITERAL = string_literal + + # The following floating and integer constants are defined as # functions to impose a strict order (otherwise, decimal # is placed before the others because its regex is longer, # and this is bad) @@ -404,17 +432,17 @@ def t_INT_CONST_DEC(self, t): return t - # Must come before bad_char_const, to prevent it from + # Must come before bad_char_const, to prevent it from # catching valid char constants as invalid - # + # @TOKEN(char_const) def t_CHAR_CONST(self, t): return t - + @TOKEN(wchar_const) def t_WCHAR_CONST(self, t): return t - + @TOKEN(unmatched_quote) def t_UNMATCHED_QUOTE(self, t): msg = "Unmatched '" @@ -428,12 +456,12 @@ @TOKEN(wstring_literal) def t_WSTRING_LITERAL(self, t): return t - + # unmatched string literals are caught by the preprocessor - + @TOKEN(bad_string_literal) def t_BAD_STRING_LITERAL(self, t): - msg = "String contains invalid escape code" + msg = "String contains invalid escape code" self._error(msg, t) @TOKEN(identifier) @@ -442,40 +470,8 @@ if t.type == 'ID' and self.type_lookup_func(t.value): t.type = "TYPEID" return t - + def t_error(self, t): msg = 'Illegal character %s' % repr(t.value[0]) self._error(msg, t) - -if __name__ == "__main__": - filename = '../zp.c' - text = open(filename).read() - - #~ text = '"'+r"""ka \p ka"""+'"' - text = r""" - 546 - #line 66 "kwas\df.h" - id 4 - # 5 - dsf - """ - - def errfoo(msg, a, b): - sys.write(msg + "\n") - sys.exit() - - def typelookup(namd): - return False - - clex = CLexer(errfoo, typelookup) - clex.build() - clex.input(text) - - while 1: - tok = clex.token() - if not tok: break - - printme([tok.value, tok.type, tok.lineno, clex.filename, tok.lexpos]) - - diff --git a/lib_pypy/cffi/_pycparser/c_parser.py b/lib_pypy/cffi/_pycparser/c_parser.py --- a/lib_pypy/cffi/_pycparser/c_parser.py +++ b/lib_pypy/cffi/_pycparser/c_parser.py @@ -3,7 +3,7 @@ # # CParser class: Parser and AST builder for the C language # -# Copyright (C) 2008-2012, Eli Bendersky +# Copyright (C) 2008-2013, Eli Bendersky # License: BSD #------------------------------------------------------------------------------ import re @@ -16,64 +16,66 @@ from .ast_transforms import fix_switch_cases -class CParser(PLYParser): +class CParser(PLYParser): def __init__( - self, + self, lex_optimize=True, lextab='cffi._pycparser.lextab', yacc_optimize=True, yacctab='cffi._pycparser.yacctab', yacc_debug=False): """ Create a new CParser. - + Some arguments for controlling the debug/optimization - level of the parser are provided. The defaults are - tuned for release/performance mode. + level of the parser are provided. The defaults are + tuned for release/performance mode. The simple rules for using them are: *) When tweaking CParser/CLexer, set these to False *) When releasing a stable parser, set to True - + lex_optimize: Set to False when you're modifying the lexer. Otherwise, changes in the lexer won't be used, if some lextab.py file exists. When releasing with a stable lexer, set to True - to save the re-generation of the lexer table on + to save the re-generation of the lexer table on each run. - + lextab: Points to the lex table that's used for optimized mode. Only if you're modifying the lexer and want - some tests to avoid re-generating the table, make + some tests to avoid re-generating the table, make this point to a local lex table file (that's been earlier generated with lex_optimize=True) - + yacc_optimize: Set to False when you're modifying the parser. Otherwise, changes in the parser won't be used, if some parsetab.py file exists. When releasing with a stable parser, set to True - to save the re-generation of the parser table on + to save the re-generation of the parser table on each run. - + yacctab: Points to the yacc table that's used for optimized - mode. Only if you're modifying the parser, make + mode. Only if you're modifying the parser, make this point to a local yacc table file - + yacc_debug: Generate a parser.out file that explains how yacc built the parsing table from the grammar. """ self.clex = CLexer( error_func=self._lex_error_func, + on_lbrace_func=self._lex_on_lbrace_func, + on_rbrace_func=self._lex_on_rbrace_func, type_lookup_func=self._lex_type_lookup_func) - + self.clex.build( optimize=lex_optimize, lextab=lextab) self.tokens = self.clex.tokens - + rules_with_opt = [ 'abstract_declarator', 'assignment_expression', @@ -89,74 +91,118 @@ 'type_qualifier_list', 'struct_declarator_list' ] - + for rule in rules_with_opt: self._create_opt_rule(rule) - + self.cparser = yacc.yacc( - module=self, + module=self, start='translation_unit_or_empty', debug=yacc_debug, optimize=yacc_optimize, tabmodule=yacctab) - - # Stack of scopes for keeping track of typedefs. _scope_stack[-1] is - # the current (topmost) scope. - # - self._scope_stack = [set()] - + + # Stack of scopes for keeping track of symbols. _scope_stack[-1] is + # the current (topmost) scope. Each scope is a dictionary that + # specifies whether a name is a type. If _scope_stack[n][name] is + # True, 'name' is currently a type in the scope. If it's False, + # 'name' is used in the scope but not as a type (for instance, if we + # saw: int name; + # If 'name' is not a key in _scope_stack[n] then 'name' was not defined + # in this scope at all. + self._scope_stack = [dict()] + + # Keeps track of the last token given to yacc (the lookahead token) + self._last_yielded_token = None + def parse(self, text, filename='', debuglevel=0): """ Parses C code and returns an AST. - + text: A string containing the C source code - + filename: Name of the file being parsed (for meaningful error messages) - + debuglevel: Debug level to yacc """ self.clex.filename = filename self.clex.reset_lineno() - self._scope_stack = [set()] - return self.cparser.parse(text, lexer=self.clex, debug=debuglevel) - + self._scope_stack = [dict()] + self._last_yielded_token = None + return self.cparser.parse( + input=text, + lexer=self.clex, + debug=debuglevel) + ######################-- PRIVATE --###################### - + def _push_scope(self): - self._scope_stack.append(set()) + self._scope_stack.append(dict()) def _pop_scope(self): assert len(self._scope_stack) > 1 self._scope_stack.pop() - def _add_typedef_type(self, name): - """ Add a new typedef-name to the current scope + def _add_typedef_name(self, name, coord): + """ Add a new typedef name (ie a TYPEID) to the current scope """ - self._scope_stack[-1].add(name) - #~ print(self._scope_stack) + if not self._scope_stack[-1].get(name, True): + self._parse_error( + "Typedef %r previously declared as non-typedef " + "in this scope" % name, coord) + self._scope_stack[-1][name] = True + + def _add_identifier(self, name, coord): + """ Add a new object, function, or enum member name (ie an ID) to the + current scope + """ + if self._scope_stack[-1].get(name, False): + self._parse_error( + "Non-typedef %r previously declared as typedef " + "in this scope" % name, coord) + self._scope_stack[-1][name] = False def _is_type_in_scope(self, name): """ Is *name* a typedef-name in the current scope? """ - return any(name in scope for scope in self._scope_stack) + for scope in reversed(self._scope_stack): + # If name is an identifier in this scope it shadows typedefs in + # higher scopes. + in_scope = scope.get(name) + if in_scope is not None: return in_scope + return False def _lex_error_func(self, msg, line, column): self._parse_error(msg, self._coord(line, column)) - + + def _lex_on_lbrace_func(self): + self._push_scope() + + def _lex_on_rbrace_func(self): + self._pop_scope() + def _lex_type_lookup_func(self, name): """ Looks up types that were previously defined with - typedef. + typedef. Passed to the lexer for recognizing identifiers that are types. """ - return self._is_type_in_scope(name) - - # To understand what's going on here, read sections A.8.5 and + is_type = self._is_type_in_scope(name) + return is_type + + def _get_yacc_lookahead_token(self): + """ We need access to yacc's lookahead token in certain cases. + This is the last token yacc requested from the lexer, so we + ask the lexer. + """ + return self.clex.last_token + + # To understand what's going on here, read sections A.8.5 and # A.8.6 of K&R2 very carefully. - # + # # A C type consists of a basic type declaration, with a list # of modifiers. For example: # @@ -166,7 +212,7 @@ # the array are the modifiers. # # Basic declarations are represented by TypeDecl (from module - # c_ast) and the modifiers are FuncDecl, PtrDecl and + # c_ast) and the modifiers are FuncDecl, PtrDecl and # ArrayDecl. # # The standard states that whenever a new modifier is parsed, @@ -175,41 +221,41 @@ # # K&R2 A.8.6.2: Array Declarators # - # In a declaration T D where D has the form - # D1 [constant-expression-opt] - # and the type of the identifier in the declaration T D1 is - # "type-modifier T", the type of the + # In a declaration T D where D has the form + # D1 [constant-expression-opt] + # and the type of the identifier in the declaration T D1 is + # "type-modifier T", the type of the # identifier of D is "type-modifier array of T" # # This is what this method does. The declarator it receives - # can be a list of declarators ending with TypeDecl. It - # tacks the modifier to the end of this list, just before + # can be a list of declarators ending with TypeDecl. It + # tacks the modifier to the end of this list, just before # the TypeDecl. # - # Additionally, the modifier may be a list itself. This is + # Additionally, the modifier may be a list itself. This is # useful for pointers, that can come as a chain from the rule - # p_pointer. In this case, the whole modifier list is spliced + # p_pointer. In this case, the whole modifier list is spliced # into the new location. # def _type_modify_decl(self, decl, modifier): """ Tacks a type modifier on a declarator, and returns the modified declarator. - + Note: the declarator and modifier may be modified """ #~ print '****' #~ decl.show(offset=3) #~ modifier.show(offset=3) #~ print '****' - + modifier_head = modifier modifier_tail = modifier - + # The modifier may be a nested list. Reach its tail. # - while modifier_tail.type: + while modifier_tail.type: modifier_tail = modifier_tail.type - + # If the decl is a basic type, just tack the modifier onto # it # @@ -222,29 +268,29 @@ # pointing to the underlying basic type. # decl_tail = decl - + while not isinstance(decl_tail.type, c_ast.TypeDecl): decl_tail = decl_tail.type - + modifier_tail.type = decl_tail.type decl_tail.type = modifier_head return decl # Due to the order in which declarators are constructed, # they have to be fixed in order to look like a normal AST. - # + # # When a declaration arrives from syntax construction, it has # these problems: # * The innermost TypeDecl has no type (because the basic # type is only known at the uppermost declaration level) # * The declaration has no variable name, since that is saved # in the innermost TypeDecl - # * The typename of the declaration is a list of type + # * The typename of the declaration is a list of type # specifiers, and not a node. Here, basic identifier types # should be separated from more complex types like enums # and structs. # - # This method fixes these problem. + # This method fixes these problems. # def _fix_decl_name_type(self, decl, typename): """ Fixes a declaration. Modifies decl. @@ -254,13 +300,13 @@ type = decl while not isinstance(type, c_ast.TypeDecl): type = type.type - + decl.name = type.declname type.quals = decl.quals - - # The typename is a list of types. If any type in this + + # The typename is a list of types. If any type in this # list isn't an IdentifierType, it must be the only - # type in the list (it's illegal to declare "int enum .." + # type in the list (it's illegal to declare "int enum ..") # If all the types are basic, they're collected in the # IdentifierType holder. # @@ -272,14 +318,25 @@ else: type.type = tn return decl - - # At this point, we know that typename is a list of IdentifierType - # nodes. Concatenate all the names into a single list. - type.type = c_ast.IdentifierType( - [name for id in typename for name in id.names], - coord=typename[0].coord) + + if not typename: + # Functions default to returning int + # + if not isinstance(decl.type, c_ast.FuncDecl): + self._parse_error( + "Missing type in declaration", decl.coord) + type.type = c_ast.IdentifierType( + ['int'], + coord=decl.coord) + else: + # At this point, we know that typename is a list of IdentifierType + # nodes. Concatenate all the names into a single list. + # + type.type = c_ast.IdentifierType( + [name for id in typename for name in id.names], + coord=typename[0].coord) return decl - + def _add_declaration_specifier(self, declspec, newspec, kind): """ Declaration specifiers are represented by a dictionary with the entries: @@ -287,31 +344,115 @@ * storage: a list of storage type qualifiers * type: a list of type specifiers * function: a list of function specifiers - - This method is given a declaration specifier, and a + + This method is given a declaration specifier, and a new specifier of a given kind. - Returns the declaration specifier, with the new + Returns the declaration specifier, with the new specifier incorporated. """ spec = declspec or dict(qual=[], storage=[], type=[], function=[]) spec[kind].insert(0, newspec) return spec - - def _build_function_definition(self, decl, spec, param_decls, body): + + def _build_declarations(self, spec, decls, typedef_namespace=False): + """ Builds a list of declarations all sharing the given specifiers. + If typedef_namespace is true, each declared name is added + to the "typedef namespace", which also includes objects, + functions, and enum constants. + """ + is_typedef = 'typedef' in spec['storage'] + declarations = [] + + # Bit-fields are allowed to be unnamed. + # + if decls[0].get('bitsize') is not None: + pass + + # When redeclaring typedef names as identifiers in inner scopes, a + # problem can occur where the identifier gets grouped into + # spec['type'], leaving decl as None. This can only occur for the + # first declarator. + # + elif decls[0]['decl'] is None: + if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \ + not self._is_type_in_scope(spec['type'][-1].names[0]): + coord = '?' + for t in spec['type']: + if hasattr(t, 'coord'): + coord = t.coord + break + self._parse_error('Invalid declaration', coord) + + # Make this look as if it came from "direct_declarator:ID" + decls[0]['decl'] = c_ast.TypeDecl( + declname=spec['type'][-1].names[0], + type=None, + quals=None, + coord=spec['type'][-1].coord) + # Remove the "new" type's name from the end of spec['type'] + del spec['type'][-1] + + # A similar problem can occur where the declaration ends up looking + # like an abstract declarator. Give it a name if this is the case. + # + elif not isinstance(decls[0]['decl'], + (c_ast.Struct, c_ast.Union, c_ast.IdentifierType)): + decls_0_tail = decls[0]['decl'] + while not isinstance(decls_0_tail, c_ast.TypeDecl): + decls_0_tail = decls_0_tail.type + if decls_0_tail.declname is None: + decls_0_tail.declname = spec['type'][-1].names[0] + del spec['type'][-1] + + for decl in decls: + assert decl['decl'] is not None + if is_typedef: + declaration = c_ast.Typedef( + name=None, + quals=spec['qual'], + storage=spec['storage'], + type=decl['decl'], + coord=decl['decl'].coord) + else: + declaration = c_ast.Decl( + name=None, + quals=spec['qual'], + storage=spec['storage'], + funcspec=spec['function'], + type=decl['decl'], + init=decl.get('init'), + bitsize=decl.get('bitsize'), + coord=decl['decl'].coord) + + if isinstance(declaration.type, + (c_ast.Struct, c_ast.Union, c_ast.IdentifierType)): + fixed_decl = declaration + else: + fixed_decl = self._fix_decl_name_type(declaration, spec['type']) + + # Add the type name defined by typedef to a + # symbol table (for usage in the lexer) + # + if typedef_namespace: + if is_typedef: + self._add_typedef_name(fixed_decl.name, fixed_decl.coord) + else: + self._add_identifier(fixed_decl.name, fixed_decl.coord) + + declarations.append(fixed_decl) + + return declarations + + def _build_function_definition(self, spec, decl, param_decls, body): """ Builds a function definition. """ - declaration = c_ast.Decl( - name=None, - quals=spec['qual'], - storage=spec['storage'], - funcspec=spec['function'], - type=decl, - init=None, - bitsize=None, - coord=decl.coord) - - typename = spec['type'] - declaration = self._fix_decl_name_type(declaration, typename) + assert 'typedef' not in spec['storage'] + + declaration = self._build_declarations( + spec=spec, + decls=[dict(decl=decl, init=None)], + typedef_namespace=True)[0] + return c_ast.FuncDef( decl=declaration, param_decls=param_decls, @@ -361,29 +502,29 @@ p[0] = c_ast.FileAST(p[1]) def p_translation_unit_1(self, p): - """ translation_unit : external_declaration + """ translation_unit : external_declaration """ # Note: external_declaration is already a list # p[0] = p[1] - + def p_translation_unit_2(self, p): """ translation_unit : translation_unit external_declaration """ if p[2] is not None: p[1].extend(p[2]) p[0] = p[1] - + # Declarations always come as lists (because they can be - # several in one line), so we wrap the function definition - # into a list as well, to make the return value of + # several in one line), so we wrap the function definition + # into a list as well, to make the return value of # external_declaration homogenous. # def p_external_declaration_1(self, p): """ external_declaration : function_definition """ p[0] = [p[1]] - + def p_external_declaration_2(self, p): """ external_declaration : declaration """ @@ -393,16 +534,16 @@ """ external_declaration : pp_directive """ p[0] = p[1] - + def p_external_declaration_4(self, p): """ external_declaration : SEMI """ p[0] = None def p_pp_directive(self, p): - """ pp_directive : PPHASH + """ pp_directive : PPHASH """ - self._parse_error('Directives not supported yet', + self._parse_error('Directives not supported yet', self._coord(p.lineno(1))) # In function definitions, the declarator can be followed by @@ -411,32 +552,37 @@ def p_function_definition_1(self, p): """ function_definition : declarator declaration_list_opt compound_statement """ - # no declaration specifiers - spec = dict(qual=[], storage=[], type=[]) + # no declaration specifiers - 'int' becomes the default type + spec = dict( + qual=[], + storage=[], + type=[c_ast.IdentifierType(['int'], + coord=self._coord(p.lineno(1)))], + function=[]) p[0] = self._build_function_definition( + spec=spec, decl=p[1], - spec=spec, param_decls=p[2], body=p[3]) - + def p_function_definition_2(self, p): """ function_definition : declaration_specifiers declarator declaration_list_opt compound_statement """ spec = p[1] p[0] = self._build_function_definition( + spec=spec, decl=p[2], - spec=spec, param_decls=p[3], body=p[4]) - + def p_statement(self, p): """ statement : labeled_statement | expression_statement | compound_statement | selection_statement - | iteration_statement + | iteration_statement | jump_statement """ p[0] = p[1] @@ -454,66 +600,43 @@ """ decl_body : declaration_specifiers init_declarator_list_opt """ spec = p[1] - is_typedef = 'typedef' in spec['storage'] - decls = [] - + # p[2] (init_declarator_list_opt) is either a list or None # if p[2] is None: - # Then it's a declaration of a struct / enum tag, - # without an actual declarator. + # By the standard, you must have at least one declarator unless + # declaring a structure tag, a union tag, or the members of an + # enumeration. # ty = spec['type'] - if len(ty) > 1: - coord = '?' - for t in ty: - if hasattr(t, 'coord'): - coord = t.coord - break - - self._parse_error('Multiple type specifiers with a type tag', - coord) - - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - storage=spec['storage'], - funcspec=spec['function'], - type=ty[0], - init=None, - bitsize=None, - coord=ty[0].coord) - decls = [decl] + s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum) + if len(ty) == 1 and isinstance(ty[0], s_u_or_e): + decls = [c_ast.Decl( + name=None, + quals=spec['qual'], + storage=spec['storage'], + funcspec=spec['function'], + type=ty[0], + init=None, + bitsize=None, + coord=ty[0].coord)] + + # However, this case can also occur on redeclared identifiers in + # an inner scope. The trouble is that the redeclared type's name + # gets grouped into declaration_specifiers; _build_declarations + # compensates for this. + # + else: + decls = self._build_declarations( + spec=spec, + decls=[dict(decl=None, init=None)], + typedef_namespace=True) + else: - for decl, init in p[2] or []: - if is_typedef: - decl = c_ast.Typedef( - name=None, - quals=spec['qual'], - storage=spec['storage'], - type=decl, - coord=decl.coord) - else: - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - storage=spec['storage'], - funcspec=spec['function'], - type=decl, - init=init, - bitsize=None, - coord=decl.coord) - - typename = spec['type'] - fixed_decl = self._fix_decl_name_type(decl, typename) - - # Add the type name defined by typedef to a - # symbol table (for usage in the lexer) - # - if is_typedef: - self._add_typedef_type(fixed_decl.name) - - decls.append(fixed_decl) + decls = self._build_declarations( + spec=spec, + decls=p[2], + typedef_namespace=True) p[0] = decls @@ -522,7 +645,7 @@ # for defining typedefs. # # If a typedef line was directly followed by a line using the - # type defined with the typedef, the type would not be + # type defined with the typedef, the type would not be # recognized. This is because to reduce the declaration rule, # the parser's lookahead asked for the token after SEMI, which # was the type from the next line, and the lexer had no chance @@ -532,42 +655,41 @@ # the parser reduces decl_body, which actually adds the new # type into the table to be seen by the lexer before the next # line is reached. - # def p_declaration(self, p): - """ declaration : decl_body SEMI + """ declaration : decl_body SEMI """ p[0] = p[1] # Since each declaration is a list of declarations, this # rule will combine all the declarations and return a single # list - # + # def p_declaration_list(self, p): """ declaration_list : declaration | declaration_list declaration """ p[0] = p[1] if len(p) == 2 else p[1] + p[2] - + def p_declaration_specifiers_1(self, p): - """ declaration_specifiers : type_qualifier declaration_specifiers_opt + """ declaration_specifiers : type_qualifier declaration_specifiers_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'qual') - + def p_declaration_specifiers_2(self, p): """ declaration_specifiers : type_specifier declaration_specifiers_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'type') - + def p_declaration_specifiers_3(self, p): """ declaration_specifiers : storage_class_specifier declaration_specifiers_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'storage') - + def p_declaration_specifiers_4(self, p): """ declaration_specifiers : function_specifier declaration_specifiers_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'function') - + def p_storage_class_specifier(self, p): """ storage_class_specifier : AUTO | REGISTER @@ -576,12 +698,12 @@ | TYPEDEF """ p[0] = p[1] - + def p_function_specifier(self, p): """ function_specifier : INLINE """ p[0] = p[1] - + def p_type_specifier_1(self, p): """ type_specifier : VOID | _BOOL @@ -603,34 +725,52 @@ | struct_or_union_specifier """ p[0] = p[1] - + def p_type_qualifier(self, p): """ type_qualifier : CONST | RESTRICT | VOLATILE """ p[0] = p[1] - - def p_init_declarator_list(self, p): + + def p_init_declarator_list_1(self, p): """ init_declarator_list : init_declarator | init_declarator_list COMMA init_declarator """ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] - # Returns a (declarator, initializer) pair - # If there's no initializer, returns (declarator, None) + # If the code is declaring a variable that was declared a typedef in an + # outer scope, yacc will think the name is part of declaration_specifiers, + # not init_declarator, and will then get confused by EQUALS. Pass None + # up in place of declarator, and handle this at a higher level. + # + def p_init_declarator_list_2(self, p): + """ init_declarator_list : EQUALS initializer + """ + p[0] = [dict(decl=None, init=p[2])] + + # Similarly, if the code contains duplicate typedefs of, for example, + # array types, the array portion will appear as an abstract declarator. + # + def p_init_declarator_list_3(self, p): + """ init_declarator_list : abstract_declarator + """ + p[0] = [dict(decl=p[1], init=None)] + + # Returns a {decl= : init=} dictionary + # If there's no initializer, uses None # def p_init_declarator(self, p): """ init_declarator : declarator | declarator EQUALS initializer """ - p[0] = (p[1], p[3] if len(p) > 2 else None) - + p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None)) + def p_specifier_qualifier_list_1(self, p): """ specifier_qualifier_list : type_qualifier specifier_qualifier_list_opt """ p[0] = self._add_declaration_specifier(p[2], p[1], 'qual') - + def p_specifier_qualifier_list_2(self, p): """ specifier_qualifier_list : type_specifier specifier_qualifier_list_opt """ @@ -645,8 +785,8 @@ """ klass = self._select_struct_union_class(p[1]) p[0] = klass( - name=p[2], - decls=None, + name=p[2], + decls=None, coord=self._coord(p.lineno(2))) def p_struct_or_union_specifier_2(self, p): @@ -669,7 +809,7 @@ coord=self._coord(p.lineno(2))) def p_struct_or_union(self, p): - """ struct_or_union : STRUCT + """ struct_or_union : STRUCT | UNION """ p[0] = p[1] @@ -686,59 +826,60 @@ """ struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI """ spec = p[1] - decls = [] - + assert 'typedef' not in spec['storage'] + if p[2] is not None: - for struct_decl in p[2]: - if struct_decl['decl'] is not None: - decl_coord = struct_decl['decl'].coord - else: - decl_coord = struct_decl['bitsize'].coord - - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - funcspec=spec['function'], - storage=spec['storage'], - type=struct_decl['decl'], - init=None, - bitsize=struct_decl['bitsize'], - coord=decl_coord) - - typename = spec['type'] - decls.append(self._fix_decl_name_type(decl, typename)) - else: + decls = self._build_declarations( + spec=spec, + decls=p[2]) + + elif len(spec['type']) == 1: # Anonymous struct/union, gcc extension, C1x feature. - # Although the standard only allows structs/unions here, I see no + # Although the standard only allows structs/unions here, I see no # reason to disallow other types since some compilers have typedefs # here, and pycparser isn't about rejecting all invalid code. - # + # node = spec['type'][0] - if isinstance(node, c_ast.Node): decl_type = node else: decl_type = c_ast.IdentifierType(node) - - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - funcspec=spec['function'], - storage=spec['storage'], - type=decl_type, - init=None, - bitsize=None, - coord=self._coord(p.lineno(3))) - decls.append(decl) - + + decls = self._build_declarations( + spec=spec, + decls=[dict(decl=decl_type)]) + + else: + # Structure/union members can have the same names as typedefs. + # The trouble is that the member's name gets grouped into + # specifier_qualifier_list; _build_declarations compensates. + # + decls = self._build_declarations( + spec=spec, + decls=[dict(decl=None, init=None)]) + p[0] = decls - + + def p_struct_declaration_2(self, p): + """ struct_declaration : specifier_qualifier_list abstract_declarator SEMI + """ + # "Abstract declarator?!", you ask? Structure members can have the + # same names as typedefs. The trouble is that the member's name gets + # grouped into specifier_qualifier_list, leaving any remainder to + # appear as an abstract declarator, as in: + # typedef int Foo; + # struct { Foo Foo[3]; }; + # + p[0] = self._build_declarations( + spec=p[1], + decls=[dict(decl=p[2], init=None)]) + def p_struct_declarator_list(self, p): """ struct_declarator_list : struct_declarator | struct_declarator_list COMMA struct_declarator """ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] - + # struct_declarator passes up a dict with the keys: decl (for # the underlying declarator) and bitsize (for the bitsize) # @@ -746,7 +887,7 @@ """ struct_declarator : declarator """ p[0] = {'decl': p[1], 'bitsize': None} - + def p_struct_declarator_2(self, p): """ struct_declarator : declarator COLON constant_expression | COLON constant_expression @@ -755,24 +896,24 @@ p[0] = {'decl': p[1], 'bitsize': p[3]} else: p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]} - + def p_enum_specifier_1(self, p): """ enum_specifier : ENUM ID | ENUM TYPEID """ p[0] = c_ast.Enum(p[2], None, self._coord(p.lineno(1))) - + def p_enum_specifier_2(self, p): """ enum_specifier : ENUM brace_open enumerator_list brace_close """ p[0] = c_ast.Enum(None, p[3], self._coord(p.lineno(1))) - + def p_enum_specifier_3(self, p): """ enum_specifier : ENUM ID brace_open enumerator_list brace_close | ENUM TYPEID brace_open enumerator_list brace_close """ p[0] = c_ast.Enum(p[2], p[4], self._coord(p.lineno(1))) - + def p_enumerator_list(self, p): """ enumerator_list : enumerator | enumerator_list COMMA @@ -791,95 +932,130 @@ | ID EQUALS constant_expression """ if len(p) == 2: - p[0] = c_ast.Enumerator( - p[1], None, + enumerator = c_ast.Enumerator( + p[1], None, self._coord(p.lineno(1))) else: - p[0] = c_ast.Enumerator( - p[1], p[3], + enumerator = c_ast.Enumerator( + p[1], p[3], self._coord(p.lineno(1))) - + self._add_identifier(enumerator.name, enumerator.coord) + + p[0] = enumerator + def p_declarator_1(self, p): - """ declarator : direct_declarator + """ declarator : direct_declarator """ p[0] = p[1] - + def p_declarator_2(self, p): - """ declarator : pointer direct_declarator + """ declarator : pointer direct_declarator """ p[0] = self._type_modify_decl(p[2], p[1]) - + + # Since it's impossible for a type to be specified after a pointer, assume + # it's intended to be the name for this declaration. _add_identifier will + # raise an error if this TYPEID can't be redeclared. + # + def p_declarator_3(self, p): + """ declarator : pointer TYPEID + """ + decl = c_ast.TypeDecl( + declname=p[2], + type=None, + quals=None, + coord=self._coord(p.lineno(2))) + + p[0] = self._type_modify_decl(decl, p[1]) + def p_direct_declarator_1(self, p): - """ direct_declarator : ID + """ direct_declarator : ID """ p[0] = c_ast.TypeDecl( - declname=p[1], - type=None, + declname=p[1], + type=None, quals=None, coord=self._coord(p.lineno(1))) - + def p_direct_declarator_2(self, p): - """ direct_declarator : LPAREN declarator RPAREN + """ direct_declarator : LPAREN declarator RPAREN """ p[0] = p[2] - + def p_direct_declarator_3(self, p): - """ direct_declarator : direct_declarator LBRACKET assignment_expression_opt RBRACKET + """ direct_declarator : direct_declarator LBRACKET assignment_expression_opt RBRACKET """ arr = c_ast.ArrayDecl( type=None, dim=p[3], coord=p[1].coord) - + p[0] = self._type_modify_decl(decl=p[1], modifier=arr) # Special for VLAs # def p_direct_declarator_4(self, p): - """ direct_declarator : direct_declarator LBRACKET TIMES RBRACKET + """ direct_declarator : direct_declarator LBRACKET TIMES RBRACKET """ arr = c_ast.ArrayDecl( type=None, dim=c_ast.ID(p[3], self._coord(p.lineno(3))), coord=p[1].coord) - + p[0] = self._type_modify_decl(decl=p[1], modifier=arr) def p_direct_declarator_5(self, p): - """ direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN + """ direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN | direct_declarator LPAREN identifier_list_opt RPAREN """ func = c_ast.FuncDecl( args=p[3], type=None, coord=p[1].coord) - + + # To see why _get_yacc_lookahead_token is needed, consider: + # typedef char TT; + # void foo(int TT) { TT = 10; } + # Outside the function, TT is a typedef, but inside (starting and + # ending with the braces) it's a parameter. The trouble begins with + # yacc's lookahead token. We don't know if we're declaring or + # defining a function until we see LBRACE, but if we wait for yacc to + # trigger a rule on that token, then TT will have already been read + # and incorrectly interpreted as TYPEID. We need to add the + # parameters to the scope the moment the lexer sees LBRACE. + # + if self._get_yacc_lookahead_token().type == "LBRACE": + if func.args is not None: + for param in func.args.params: + if isinstance(param, c_ast.EllipsisParam): break + self._add_identifier(param.name, param.coord) + p[0] = self._type_modify_decl(decl=p[1], modifier=func) - + def p_pointer(self, p): """ pointer : TIMES type_qualifier_list_opt | TIMES type_qualifier_list_opt pointer """ coord = self._coord(p.lineno(1)) - + p[0] = c_ast.PtrDecl( quals=p[2] or [], type=p[3] if len(p) > 3 else None, coord=coord) - + def p_type_qualifier_list(self, p): """ type_qualifier_list : type_qualifier | type_qualifier_list type_qualifier """ p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]] - + def p_parameter_type_list(self, p): """ parameter_type_list : parameter_list | parameter_list COMMA ELLIPSIS """ - if len(p) > 2: + if len(p) > 2: p[1].params.append(c_ast.EllipsisParam(self._coord(p.lineno(3)))) - + p[0] = p[1] def p_parameter_list(self, p): @@ -896,33 +1072,43 @@ """ parameter_declaration : declaration_specifiers declarator """ spec = p[1] - decl = p[2] - - decl = c_ast.Decl( - name=None, - quals=spec['qual'], - storage=spec['storage'], - funcspec=spec['function'], - type=decl, - init=None, - bitsize=None, - coord=decl.coord) - - typename = spec['type'] or ['int'] - p[0] = self._fix_decl_name_type(decl, typename) - + if not spec['type']: + spec['type'] = [c_ast.IdentifierType(['int'], + coord=self._coord(p.lineno(1)))] + p[0] = self._build_declarations( + spec=spec, + decls=[dict(decl=p[2])])[0] + def p_parameter_declaration_2(self, p): """ parameter_declaration : declaration_specifiers abstract_declarator_opt """ spec = p[1] - decl = c_ast.Typename( - quals=spec['qual'], - type=p[2] or c_ast.TypeDecl(None, None, None), - coord=self._coord(p.lineno(2))) - - typename = spec['type'] or ['int'] - p[0] = self._fix_decl_name_type(decl, typename) - + if not spec['type']: + spec['type'] = [c_ast.IdentifierType(['int'], + coord=self._coord(p.lineno(1)))] + + # Parameters can have the same names as typedefs. The trouble is that + # the parameter's name gets grouped into declaration_specifiers, making + # it look like an old-style declaration; compensate. + # + if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \ + self._is_type_in_scope(spec['type'][-1].names[0]): + decl = self._build_declarations( + spec=spec, + decls=[dict(decl=p[2], init=None)])[0] + + # This truly is an old-style parameter declaration + # + else: + decl = c_ast.Typename( + quals=spec['qual'], + type=p[2] or c_ast.TypeDecl(None, None, None), + coord=self._coord(p.lineno(2))) + typename = spec['type'] + decl = self._fix_decl_name_type(decl, typename) + + p[0] = decl + def p_identifier_list(self, p): """ identifier_list : identifier | identifier_list COMMA identifier @@ -937,7 +1123,7 @@ """ initializer : assignment_expression """ p[0] = p[1] From noreply at buildbot.pypy.org Fri Mar 7 03:31:38 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 7 Mar 2014 03:31:38 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix test_datetime Message-ID: <20140307023138.E57631D2313@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r69770:0e92f3e927c3 Date: 2014-03-06 21:30 -0500 http://bitbucket.org/pypy/pypy/changeset/0e92f3e927c3/ Log: fix test_datetime diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -1820,6 +1820,8 @@ return (self._offset, self._name) def __eq__(self, other): + if type(other) != timezone: + return False return self._offset == other._offset def __hash__(self): From noreply at buildbot.pypy.org Fri Mar 7 03:36:17 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 7 Mar 2014 03:36:17 +0100 (CET) Subject: [pypy-commit] pypy py3k: these pass now Message-ID: <20140307023617.B89BA1D2360@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69771:b8e86fd40eca Date: 2014-03-06 18:26 -0800 http://bitbucket.org/pypy/pypy/changeset/b8e86fd40eca/ Log: these pass now diff --git a/lib-python/3/ctypes/test/test_numbers.py b/lib-python/3/ctypes/test/test_numbers.py --- a/lib-python/3/ctypes/test/test_numbers.py +++ b/lib-python/3/ctypes/test/test_numbers.py @@ -105,7 +105,6 @@ self.assertEqual(ArgType, type(parm)) - @xfail def test_floats(self): # c_float and c_double can be created from # Python int, long and float @@ -119,7 +118,6 @@ self.assertEqual(t(2).value, 2.0) self.assertEqual(t(f).value, 2.0) - @xfail def test_integers(self): class FloatLike(object): def __float__(self): From noreply at buildbot.pypy.org Fri Mar 7 03:36:19 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 7 Mar 2014 03:36:19 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix: py3 is stricter about len results Message-ID: <20140307023619.25BF81D2360@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69772:a44079249698 Date: 2014-03-06 18:34 -0800 http://bitbucket.org/pypy/pypy/changeset/a44079249698/ Log: fix: py3 is stricter about len results diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -269,7 +269,7 @@ def _check_len_result(space, w_obj): # Will complain if result is too big. - result = space.int_w(w_obj) + result = space.int_w(w_obj, allow_conversion=False) if result < 0: raise oefmt(space.w_ValueError, "__len__() should return >= 0") return result From noreply at buildbot.pypy.org Fri Mar 7 03:36:20 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 7 Mar 2014 03:36:20 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge upstream Message-ID: <20140307023620.6AA971D2360@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69773:da301f6d84f4 Date: 2014-03-06 18:35 -0800 http://bitbucket.org/pypy/pypy/changeset/da301f6d84f4/ Log: merge upstream diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -1820,6 +1820,8 @@ return (self._offset, self._name) def __eq__(self, other): + if type(other) != timezone: + return False return self._offset == other._offset def __hash__(self): From noreply at buildbot.pypy.org Fri Mar 7 03:41:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 7 Mar 2014 03:41:48 +0100 (CET) Subject: [pypy-commit] pypy py3k: check datetime __format__ argument Message-ID: <20140307024148.40CA81C1007@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r69774:ca4dd62f2af0 Date: 2014-03-06 21:40 -0500 http://bitbucket.org/pypy/pypy/changeset/ca4dd62f2af0/ Log: check datetime __format__ argument diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -747,6 +747,8 @@ return _wrap_strftime(self, fmt, self.timetuple()) def __format__(self, fmt): + if not isinstance(fmt, str): + raise TypeError("must be str, not %s" % type(fmt).__name__) if len(fmt) != 0: return self.strftime(fmt) return str(self) @@ -1211,6 +1213,8 @@ return _wrap_strftime(self, fmt, timetuple) def __format__(self, fmt): + if not isinstance(fmt, str): + raise TypeError("must be str, not %s" % type(fmt).__name__) if len(fmt) != 0: return self.strftime(fmt) return str(self) diff --git a/lib-python/3/test/datetimetester.py b/lib-python/3/test/datetimetester.py --- a/lib-python/3/test/datetimetester.py +++ b/lib-python/3/test/datetimetester.py @@ -1131,11 +1131,13 @@ #check that this standard extension works t.strftime("%f") - def test_format(self): dt = self.theclass(2007, 9, 10) self.assertEqual(dt.__format__(''), str(dt)) + with self.assertRaisesRegex(TypeError, '^must be str, not int$'): + dt.__format__(123) + # check that a derived class's __str__() gets called class A(self.theclass): def __str__(self): @@ -1464,6 +1466,9 @@ dt = self.theclass(2007, 9, 10, 4, 5, 1, 123) self.assertEqual(dt.__format__(''), str(dt)) + with self.assertRaisesRegex(TypeError, '^must be str, not int$'): + dt.__format__(123) + # check that a derived class's __str__() gets called class A(self.theclass): def __str__(self): @@ -1775,6 +1780,7 @@ for insane in -1e200, 1e200: self.assertRaises(ValueError, self.theclass.utcfromtimestamp, insane) + @unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps") def test_negative_float_fromtimestamp(self): # The result is tz-dependent; at least test that this doesn't @@ -2153,6 +2159,9 @@ t = self.theclass(1, 2, 3, 4) self.assertEqual(t.__format__(''), str(t)) + with self.assertRaisesRegex(TypeError, '^must be str, not int$'): + t.__format__(123) + # check that a derived class's __str__() gets called class A(self.theclass): def __str__(self): @@ -3728,13 +3737,15 @@ datetime(10, 10, '10') f10 = Number(10.9) - with self.assertRaisesRegex(TypeError, '^nb_int should return int object$'): + with self.assertRaisesRegex(TypeError, '^nb_int should return int ' + 'object$'): datetime(10, 10, f10) class Float(float): pass s10 = Float(10.9) - with self.assertRaisesRegex(TypeError, '^integer argument expected, got float$'): + with self.assertRaisesRegex(TypeError, '^integer argument expected, ' + 'got float$'): datetime(10, 10, s10) with self.assertRaises(TypeError): From noreply at buildbot.pypy.org Fri Mar 7 07:41:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 07:41:55 +0100 (CET) Subject: [pypy-commit] cffi default: Update to 0.8.2 more officially Message-ID: <20140307064155.B5BDC1C0906@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1474:a6d2e28138cf Date: 2014-03-07 07:32 +0100 http://bitbucket.org/cffi/cffi/changeset/a6d2e28138cf/ Log: Update to 0.8.2 more officially diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5482,7 +5482,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("0.8"); + v = PyText_FromString("0.8.2"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3197,4 +3197,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8" + assert __version__ == "0.8.2" diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -85,7 +85,7 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.1.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.2.tar.gz - Or grab the most current version by following the instructions below. diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -10,7 +10,6 @@ '0.7.1': '0.7', # did not change '0.7.2': '0.7', # did not change '0.8.1': '0.8', # did not change (essentially) - '0.8.2': '0.8', # did not change } def test_version(): @@ -25,7 +24,7 @@ content = open(p).read() # v = cffi.__version__ - assert ("version = '%s'\n" % BACKEND_VERSIONS.get(v, v)) in content + assert ("version = '%s'\n" % v[:3]) in content assert ("release = '%s'\n" % v) in content def test_doc_version_file(): From noreply at buildbot.pypy.org Fri Mar 7 07:41:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 07:41:56 +0100 (CET) Subject: [pypy-commit] cffi default: Python 3 fix Message-ID: <20140307064156.CCFA51C0906@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1475:ce45900bd551 Date: 2014-03-07 07:40 +0100 http://bitbucket.org/cffi/cffi/changeset/ce45900bd551/ Log: Python 3 fix diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1563,10 +1563,10 @@ assert ffi.alignof("struct is_packed") == 1 s = ffi.new("struct is_packed[2]") s[0].b = 42623381 - s[0].a = 'X' + s[0].a = b'X' s[1].b = -4892220 - s[1].a = 'Y' + s[1].a = b'Y' assert s[0].b == 42623381 - assert s[0].a == 'X' + assert s[0].a == b'X' assert s[1].b == -4892220 - assert s[1].a == 'Y' + assert s[1].a == b'Y' From noreply at buildbot.pypy.org Fri Mar 7 07:45:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 07:45:58 +0100 (CET) Subject: [pypy-commit] cffi default: Skip a test on Windows Message-ID: <20140307064558.C747F1C09B2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1476:c90435b15c83 Date: 2014-03-07 07:45 +0100 http://bitbucket.org/cffi/cffi/changeset/c90435b15c83/ Log: Skip a test on Windows diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3186,6 +3186,8 @@ assert alignof(BStruct) == 1 def test_packed_with_bitfields(): + if sys.platform == "win32": + py.test.skip("testing gcc behavior") BLong = new_primitive_type("long") BChar = new_primitive_type("char") BStruct = new_struct_type("struct foo") From noreply at buildbot.pypy.org Fri Mar 7 07:48:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 07:48:39 +0100 (CET) Subject: [pypy-commit] cffi default: Skip the sinf test on windows Message-ID: <20140307064839.0F9231C13AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1477:856e8185c32f Date: 2014-03-07 07:48 +0100 http://bitbucket.org/cffi/cffi/changeset/856e8185c32f/ Log: Skip the sinf test on windows diff --git a/testing/test_function.py b/testing/test_function.py --- a/testing/test_function.py +++ b/testing/test_function.py @@ -54,6 +54,8 @@ assert x == math.sin(1.23) def test_sinf(self): + if sys.platform == 'win32': + py.test.skip("no sinf found in the Windows stdlib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" float sinf(float x); From noreply at buildbot.pypy.org Fri Mar 7 07:52:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 07:52:55 +0100 (CET) Subject: [pypy-commit] cffi release-0.8: Merge for release 0.8.2 Message-ID: <20140307065255.7B4BC1C13AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1478:a34c2b828964 Date: 2014-03-07 07:52 +0100 http://bitbucket.org/cffi/cffi/changeset/a34c2b828964/ Log: Merge for release 0.8.2 diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -7,4 +7,4 @@ an opaque type that works like a struct (so we can't get the value out of it). -_cffi backend for PyPy +accept and kill "static inline" in the cdefs diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -13,6 +13,9 @@ #include #include #include +#if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +#endif #endif #include "malloc_closure.h" @@ -832,7 +835,7 @@ return new_simple_cdata(ptrdata, ct); } else if (ct->ct_flags & CT_IS_OPAQUE) { - PyErr_Format(PyExc_TypeError, "cannot return a cdata '%s'", + PyErr_Format(PyExc_TypeError, "cdata '%s' is opaque", ct->ct_name); return NULL; } @@ -3580,9 +3583,40 @@ return cf; /* borrowed reference */ } -#define SF_MSVC_BITFIELDS 1 -#define SF_GCC_ARM_BITFIELDS 2 -#define SF_GCC_BIG_ENDIAN 4 +#define SF_MSVC_BITFIELDS 0x01 +#define SF_GCC_ARM_BITFIELDS 0x02 +#define SF_GCC_X86_BITFIELDS 0x10 + +#define SF_GCC_BIG_ENDIAN 0x04 +#define SF_GCC_LITTLE_ENDIAN 0x40 + +#define SF_PACKED 0x08 + +static int complete_sflags(int sflags) +{ + /* add one of the SF_xxx_BITFIELDS flags if none is specified */ + if (!(sflags & (SF_MSVC_BITFIELDS | SF_GCC_ARM_BITFIELDS | + SF_GCC_X86_BITFIELDS))) { +#ifdef MS_WIN32 + sflags |= SF_MSVC_BITFIELDS; +#else +# ifdef __arm__ + sflags |= SF_GCC_ARM_BITFIELDS; +# else + sflags |= SF_GCC_X86_BITFIELDS; +# endif +#endif + } + /* add one of SF_GCC_xx_ENDIAN if none is specified */ + if (!(sflags & (SF_GCC_BIG_ENDIAN | SF_GCC_LITTLE_ENDIAN))) { + int _check_endian = 1; + if (*(char *)&_check_endian == 0) + sflags |= SF_GCC_BIG_ENDIAN; + else + sflags |= SF_GCC_LITTLE_ENDIAN; + } + return sflags; +} static PyObject *b_complete_struct_or_union(PyObject *self, PyObject *args) { @@ -3594,18 +3628,7 @@ int totalalignment = -1; CFieldObject **previous; int prev_bitfield_size, prev_bitfield_free; -#ifdef MS_WIN32 - int sflags = SF_MSVC_BITFIELDS; -#else -# ifdef __arm__ - int sflags = SF_GCC_ARM_BITFIELDS; -# else int sflags = 0; -# endif - int _check_endian = 1; - if (*(char *)&_check_endian == 0) - sflags |= SF_GCC_BIG_ENDIAN; -#endif if (!PyArg_ParseTuple(args, "O!O!|Onii:complete_struct_or_union", &CTypeDescr_Type, &ct, @@ -3613,6 +3636,8 @@ &ignored, &totalsize, &totalalignment, &sflags)) return NULL; + sflags = complete_sflags(sflags); + if ((ct->ct_flags & (CT_STRUCT|CT_IS_OPAQUE)) == (CT_STRUCT|CT_IS_OPAQUE)) { is_union = 0; @@ -3668,8 +3693,8 @@ boffset = 0; /* reset each field at offset 0 */ /* update the total alignment requirement, but skip it if the - field is an anonymous bitfield */ - falign = get_alignment(ftype); + field is an anonymous bitfield or if SF_PACKED */ + falign = (sflags & SF_PACKED) ? 1 : get_alignment(ftype); if (falign < 0) goto error; @@ -3781,6 +3806,7 @@ PyErr_Format(PyExc_TypeError, "field '%s.%s' is declared with :0", ct->ct_name, PyText_AS_UTF8(fname)); + goto error; } if (!(sflags & SF_MSVC_BITFIELDS)) { /* GCC's notion of "ftype :0;" */ @@ -3812,6 +3838,14 @@ if (bits_already_occupied + fbitsize > 8 * ftype->ct_size) { /* it would not fit, we need to start at the next allowed position */ + if ((sflags & SF_PACKED) && + (bits_already_occupied & 7)) { + PyErr_Format(PyExc_NotImplementedError, + "with 'packed', gcc would compile field " + "'%s.%s' to reuse some bits in the previous " + "field", ct->ct_name, PyText_AS_UTF8(fname)); + goto error; + } field_offset_bytes += falign; assert(boffset < field_offset_bytes * 8); boffset = field_offset_bytes * 8; @@ -5448,7 +5482,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("0.8"); + v = PyText_FromString("0.8.2"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/minibuffer.h b/c/minibuffer.h --- a/c/minibuffer.h +++ b/c/minibuffer.h @@ -93,6 +93,14 @@ *lenp = self->mb_size; return 1; } + +static PyObject *mb_str(MiniBufferObj *self) +{ + /* Python 2: we want str(buffer) to behave like buffer[:], because + that's what bytes(buffer) does on Python 3 and there is no way + we can prevent this. */ + return PyString_FromStringAndSize(self->mb_data, self->mb_size); +} #endif static int mb_getbuf(MiniBufferObj *self, Py_buffer *view, int flags) @@ -249,7 +257,11 @@ #endif 0, /* tp_hash */ 0, /* tp_call */ +#if PY_MAJOR_VERSION < 3 + (reprfunc)mb_str, /* tp_str */ +#else 0, /* tp_str */ +#endif PyObject_GenericGetAttr, /* tp_getattro */ 0, /* tp_setattro */ &mb_as_buffer, /* tp_as_buffer */ diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -370,6 +370,9 @@ assert x.load_function(BVoidP, 'strcpy') py.test.raises(KeyError, x.load_function, BVoidP, 'xxx_this_function_does_not_exist') + # the next one is from 'libm', not 'libc', but we assume + # that it is already loaded too, so it should work + assert x.load_function(BVoidP, 'sqrt') def test_hash_differences(): BChar = new_primitive_type("char") @@ -1429,8 +1432,10 @@ p = newp(BStructPtr, [12]) assert p.a1 == 12 e = py.test.raises(TypeError, newp, BStructPtr, [None]) - assert ("an integer is required" in str(e.value) or - "unsupported operand type for int(): 'NoneType'" in str(e.value)) #PyPy + msg = str(e.value) + assert ("an integer is required" in msg or # CPython + "unsupported operand type for int(): 'NoneType'" in msg or # old PyPys + "expected integer, got NoneType object" in msg) # newer PyPys py.test.raises(TypeError, 'p.a1 = "def"') if sys.version_info < (3,): BEnum2 = new_enum_type(unicode("foo"), (unicode('abc'),), (5,), BInt) @@ -2154,7 +2159,13 @@ c = newp(BCharArray, b"hi there") # buf = buffer(c) - assert str(buf).startswith('<_cffi_backend.buffer object at 0x') + assert repr(buf).startswith('<_cffi_backend.buffer object at 0x') + assert bytes(buf) == b"hi there\x00" + if sys.version_info < (3,): + assert str(buf) == "hi there\x00" + assert unicode(buf) == u+"hi there\x00" + else: + assert str(buf) == repr(buf) # --mb_length-- assert len(buf) == len(b"hi there\x00") # --mb_item-- @@ -2886,7 +2897,7 @@ ('b1', BInt, 9), ('b2', BUInt, 7), ('c', BChar, -1)], -1, -1, -1, flag) - if flag % 2 == 0: # gcc, any variant + if not (flag & SF_MSVC_BITFIELDS): # gcc, any variant assert typeoffsetof(BStruct, 'c') == (BChar, 3) assert sizeof(BStruct) == 4 else: # msvc @@ -2901,20 +2912,20 @@ p.c = b'\x9D' raw = buffer(p)[:] if sys.byteorder == 'little': - if flag == 0 or flag == 2: # gcc, little endian + if flag & SF_MSVC_BITFIELDS: + assert raw == b'A\x00\x00\x007\xC7\x00\x00\x9D\x00\x00\x00' + elif flag & SF_GCC_LITTLE_ENDIAN: assert raw == b'A7\xC7\x9D' - elif flag == 1: # msvc - assert raw == b'A\x00\x00\x007\xC7\x00\x00\x9D\x00\x00\x00' - elif flag == 4: # gcc, big endian + elif flag & SF_GCC_BIG_ENDIAN: assert raw == b'A\xE3\x9B\x9D' else: raise AssertionError("bad flag") else: - if flag == 0 or flag == 2: # gcc + if flag & SF_MSVC_BITFIELDS: + assert raw == b'A\x00\x00\x00\x00\x00\xC77\x9D\x00\x00\x00' + elif flag & SF_GCC_LITTLE_ENDIAN: assert raw == b'A\xC77\x9D' - elif flag == 1: # msvc - assert raw == b'A\x00\x00\x00\x00\x00\xC77\x9D\x00\x00\x00' - elif flag == 4: # gcc, big endian + elif flag & SF_GCC_BIG_ENDIAN: assert raw == b'A\x9B\xE3\x9D' else: raise AssertionError("bad flag") @@ -2924,18 +2935,15 @@ ('', BShort, 9), ('c', BChar, -1)], -1, -1, -1, flag) assert typeoffsetof(BStruct, 'c') == (BChar, 4) - if flag == 0: # gcc + if flag & SF_MSVC_BITFIELDS: + assert sizeof(BStruct) == 6 + assert alignof(BStruct) == 2 + elif flag & SF_GCC_X86_BITFIELDS: assert sizeof(BStruct) == 5 assert alignof(BStruct) == 1 - elif flag == 1: # msvc + elif flag & SF_GCC_ARM_BITFIELDS: assert sizeof(BStruct) == 6 assert alignof(BStruct) == 2 - elif flag == 2: # gcc ARM - assert sizeof(BStruct) == 6 - assert alignof(BStruct) == 2 - elif flag == 4: # gcc, big endian - assert sizeof(BStruct) == 5 - assert alignof(BStruct) == 1 else: raise AssertionError("bad flag") # @@ -2944,37 +2952,43 @@ ('', BInt, 0), ('', BInt, 0), ('c', BChar, -1)], -1, -1, -1, flag) - if flag == 0: # gcc + if flag & SF_MSVC_BITFIELDS: + assert typeoffsetof(BStruct, 'c') == (BChar, 1) + assert sizeof(BStruct) == 2 + assert alignof(BStruct) == 1 + elif flag & SF_GCC_X86_BITFIELDS: assert typeoffsetof(BStruct, 'c') == (BChar, 4) assert sizeof(BStruct) == 5 assert alignof(BStruct) == 1 - elif flag == 1: # msvc - assert typeoffsetof(BStruct, 'c') == (BChar, 1) - assert sizeof(BStruct) == 2 - assert alignof(BStruct) == 1 - elif flag == 2: # gcc ARM + elif flag & SF_GCC_ARM_BITFIELDS: assert typeoffsetof(BStruct, 'c') == (BChar, 4) assert sizeof(BStruct) == 8 assert alignof(BStruct) == 4 - elif flag == 4: # gcc, big endian - assert typeoffsetof(BStruct, 'c') == (BChar, 4) - assert sizeof(BStruct) == 5 - assert alignof(BStruct) == 1 else: raise AssertionError("bad flag") -def test_bitfield_as_gcc(): - _test_bitfield_details(flag=0) +SF_MSVC_BITFIELDS = 0x01 +SF_GCC_ARM_BITFIELDS = 0x02 +SF_GCC_X86_BITFIELDS = 0x10 + +SF_GCC_BIG_ENDIAN = 0x04 +SF_GCC_LITTLE_ENDIAN = 0x40 + +SF_PACKED = 0x08 + +def test_bitfield_as_x86_gcc(): + _test_bitfield_details(flag=SF_GCC_X86_BITFIELDS|SF_GCC_LITTLE_ENDIAN) def test_bitfield_as_msvc(): - _test_bitfield_details(flag=1) + _test_bitfield_details(flag=SF_MSVC_BITFIELDS|SF_GCC_LITTLE_ENDIAN) def test_bitfield_as_arm_gcc(): - _test_bitfield_details(flag=2) + _test_bitfield_details(flag=SF_GCC_ARM_BITFIELDS|SF_GCC_LITTLE_ENDIAN) -def test_bitfield_as_big_endian(): - _test_bitfield_details(flag=4) +def test_bitfield_as_ppc_gcc(): + # PowerPC uses the same format as X86, but is big-endian + _test_bitfield_details(flag=SF_GCC_X86_BITFIELDS|SF_GCC_BIG_ENDIAN) def test_struct_array_no_length(): @@ -3136,7 +3150,53 @@ py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)") py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)") +def test_sizeof_sliced_array(): + BInt = new_primitive_type("int") + BArray = new_array_type(new_pointer_type(BInt), 10) + p = newp(BArray, None) + assert sizeof(p[2:9]) == 7 * sizeof(BInt) + +def test_packed(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BShort = new_primitive_type("short") + BStruct = new_struct_type("struct foo") + complete_struct_or_union(BStruct, [('a1', BLong, -1), + ('a2', BChar, -1), + ('a3', BShort, -1)], + None, -1, -1, SF_PACKED) + d = BStruct.fields + assert len(d) == 3 + assert d[0][0] == 'a1' + assert d[0][1].type is BLong + assert d[0][1].offset == 0 + assert d[0][1].bitshift == -1 + assert d[0][1].bitsize == -1 + assert d[1][0] == 'a2' + assert d[1][1].type is BChar + assert d[1][1].offset == sizeof(BLong) + assert d[1][1].bitshift == -1 + assert d[1][1].bitsize == -1 + assert d[2][0] == 'a3' + assert d[2][1].type is BShort + assert d[2][1].offset == sizeof(BLong) + sizeof(BChar) + assert d[2][1].bitshift == -1 + assert d[2][1].bitsize == -1 + assert sizeof(BStruct) == sizeof(BLong) + sizeof(BChar) + sizeof(BShort) + assert alignof(BStruct) == 1 + +def test_packed_with_bitfields(): + if sys.platform == "win32": + py.test.skip("testing gcc behavior") + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BStruct = new_struct_type("struct foo") + py.test.raises(NotImplementedError, + complete_struct_or_union, + BStruct, [('a1', BLong, 30), + ('a2', BChar, 5)], + None, -1, -1, SF_PACKED) def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8" + assert __version__ == "0.8.2" diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.1" -__version_info__ = (0, 8, 1) +__version__ = "0.8.2" +__version_info__ = (0, 8, 2) diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -1,4 +1,4 @@ -import types +import sys, types from .lock import allocate_lock try: @@ -88,18 +88,20 @@ self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() - def cdef(self, csource, override=False): + def cdef(self, csource, override=False, packed=False): """Parse the given C source. This registers all declared functions, types, and global variables. The functions and global variables can then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. The types can be used in 'ffi.new()' and other functions. + If 'packed' is specified as True, all structs declared inside this + cdef are packed, i.e. laid out without any field alignment at all. """ if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: - self._parser.parse(csource, override=override) + self._parser.parse(csource, override=override, packed=packed) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -387,22 +389,27 @@ return self._backend.from_handle(x) -def _make_ffi_library(ffi, libname, flags): - import os - name = libname +def _load_backend_lib(backend, name, flags): if name is None: - name = 'c' # on Posix only - backend = ffi._backend + if sys.platform != "win32": + return backend.load_library(None, flags) + name = "c" # Windows: load_library(None) fails, but this works + # (backward compatibility hack only) try: if '.' not in name and '/' not in name: raise OSError("library not found: %r" % (name,)) - backendlib = backend.load_library(name, flags) + return backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: raise # propagate the original OSError - backendlib = backend.load_library(path, flags) + return backend.load_library(path, flags) + +def _make_ffi_library(ffi, libname, flags): + import os + backend = ffi._backend + backendlib = _load_backend_lib(backend, libname, flags) copied_enums = [] # def make_accessor_locked(name): diff --git a/cffi/backend_ctypes.py b/cffi/backend_ctypes.py --- a/cffi/backend_ctypes.py +++ b/cffi/backend_ctypes.py @@ -720,7 +720,7 @@ return self._new_struct_or_union('union', name, ctypes.Union) def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, - totalsize=-1, totalalignment=-1): + totalsize=-1, totalalignment=-1, sflags=0): if totalsize >= 0 or totalalignment >= 0: raise NotImplementedError("the ctypes backend of CFFI does not support " "structures completed by verify(); please " @@ -739,6 +739,8 @@ else: cfields.append((fname, BField._ctype, bitsize)) bfield_types[fname] = Ellipsis + if sflags & 8: + struct_or_union._pack_ = 1 struct_or_union._fields_ = cfields CTypesStructOrUnion._bfield_types = bfield_types # diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -98,6 +98,7 @@ self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() self._override = False + self._packed = False def _parse(self, csource): csource, macros = _preprocess(csource) @@ -147,13 +148,16 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False): + def parse(self, csource, override=False, packed=False): prev_override = self._override + prev_packed = self._packed try: self._override = override + self._packed = packed self._internal_parse(csource) finally: self._override = prev_override + self._packed = prev_packed def _internal_parse(self, csource): ast, macros = self._parse(csource) @@ -476,6 +480,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) + tp.packed = self._packed return tp def _make_partial(self, tp, nested): diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -1,4 +1,6 @@ +import types import weakref + from .lock import allocate_lock @@ -81,29 +83,29 @@ 'long': 'i', 'long long': 'i', 'signed char': 'i', - 'unsigned char': 'u', - 'unsigned short': 'u', - 'unsigned int': 'u', - 'unsigned long': 'u', - 'unsigned long long': 'u', + 'unsigned char': 'i', + 'unsigned short': 'i', + 'unsigned int': 'i', + 'unsigned long': 'i', + 'unsigned long long': 'i', 'float': 'f', 'double': 'f', 'long double': 'f', - '_Bool': 'u', + '_Bool': 'i', # the following types are not primitive in the C sense 'wchar_t': 'c', 'int8_t': 'i', - 'uint8_t': 'u', + 'uint8_t': 'i', 'int16_t': 'i', - 'uint16_t': 'u', + 'uint16_t': 'i', 'int32_t': 'i', - 'uint32_t': 'u', + 'uint32_t': 'i', 'int64_t': 'i', - 'uint64_t': 'u', + 'uint64_t': 'i', 'intptr_t': 'i', - 'uintptr_t': 'u', + 'uintptr_t': 'i', 'ptrdiff_t': 'i', - 'size_t': 'u', + 'size_t': 'i', 'ssize_t': 'i', } @@ -114,12 +116,8 @@ def is_char_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' - def is_signed_type(self): + def is_integer_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' - def is_unsigned_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'u' - def is_integer_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] in 'iu' def is_float_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' @@ -259,6 +257,7 @@ fixedlayout = None completed = False partial = False + packed = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -315,7 +314,11 @@ fldtypes = [tp.get_cached_btype(ffi, finishlist) for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) - ffi._backend.complete_struct_or_union(BType, lst, self) + sflags = 0 + if self.packed: + sflags = 8 # SF_PACKED + ffi._backend.complete_struct_or_union(BType, lst, self, + -1, -1, sflags) # else: fldtypes = [] @@ -468,8 +471,7 @@ # initialize the __typecache attribute, either at the module level # if ffi._backend is a module, or at the class level if ffi._backend # is some instance. - ModuleType = type(weakref) - if isinstance(ffi._backend, ModuleType): + if isinstance(ffi._backend, types.ModuleType): ffi._backend.__typecache = weakref.WeakValueDictionary() else: type(ffi._backend).__typecache = weakref.WeakValueDictionary() diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -214,10 +214,7 @@ extraarg = '' if isinstance(tp, model.PrimitiveType): if tp.is_integer_type() and tp.name != '_Bool': - if tp.is_signed_type(): - converter = '_cffi_to_c_SIGNED' - else: - converter = '_cffi_to_c_UNSIGNED' + converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) @@ -270,10 +267,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.PrimitiveType): if tp.is_integer_type(): - if tp.is_signed_type(): - return '_cffi_from_c_SIGNED(%s, %s)' % (var, tp.name) - else: - return '_cffi_from_c_UNSIGNED(%s, %s)' % (var, tp.name) + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif tp.name != 'long double': return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) else: @@ -801,25 +795,23 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble -#define _cffi_from_c_SIGNED(x, type) \ - (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ - PyLong_FromLongLong(x)) -#define _cffi_from_c_UNSIGNED(x, type) \ - (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ - sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ - PyLong_FromUnsignedLongLong(x)) +#define _cffi_from_c_int(x, type) \ + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ + sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ + PyLong_FromUnsignedLongLong(x)) \ + : (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ + PyLong_FromLongLong(x))) -#define _cffi_to_c_SIGNED(o, type) \ - (sizeof(type) == 1 ? _cffi_to_c_i8(o) : \ - sizeof(type) == 2 ? _cffi_to_c_i16(o) : \ - sizeof(type) == 4 ? _cffi_to_c_i32(o) : \ - sizeof(type) == 8 ? _cffi_to_c_i64(o) : \ - (Py_FatalError("unsupported size for type " #type), 0)) -#define _cffi_to_c_UNSIGNED(o, type) \ - (sizeof(type) == 1 ? _cffi_to_c_u8(o) : \ - sizeof(type) == 2 ? _cffi_to_c_u16(o) : \ - sizeof(type) == 4 ? _cffi_to_c_u32(o) : \ - sizeof(type) == 8 ? _cffi_to_c_u64(o) : \ +#define _cffi_to_c_int(o, type) \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ + : _cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ + : _cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ + : _cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ + : _cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -905,11 +897,13 @@ if (c_api_object == NULL) return; if (!PyCapsule_CheckExact(c_api_object)) { + Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); return; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + Py_DECREF(c_api_object); } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.8.1' +release = '0.8.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -85,13 +85,13 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.1.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.2.tar.gz - Or grab the most current version by following the instructions below. - - MD5: 1a877bf113bfe90fdefedbf9e39310d2 + - MD5: ... - - SHA: d46b7cf92956fa01d9f8e0a8d3c7e2005ae40893 + - SHA: ... * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` @@ -851,6 +851,14 @@ ``ffi`` normally caches the string ``"int[]"`` to not need to re-parse it all the time. +.. versionadded:: 0.9 + The ``ffi.cdef()`` call takes an optional argument ``packed``: if + True, then all structs declared within this cdef are "packed". This + has a meaning similar to ``__attribute__((packed))`` in GCC. It + specifies that all structure fields should have an alignment of one + byte. (Note that the packed attribute has no effect on bit fields so + far, which mean that they may be packed differently than on GCC.) + Python 3 support ---------------- @@ -1172,7 +1180,7 @@ because these objects' API changes too much across Python versions. Instead it has the following Python API (a subset of ``buffer``): -- ``buf[:]``: fetch a copy as a regular byte string (or +- ``buf[:]`` or ``bytes(buf)``: fetch a copy as a regular byte string (or ``buf[start:end]`` for a part) - ``buf[:] = newstr``: change the original content (or ``buf[start:end] @@ -1187,6 +1195,14 @@ owned memory will not be freed as long as the buffer is alive. Moreover buffer objects now support weakrefs to them. +.. versionchanged:: 0.9 + Before version 0.9, ``bytes(buf)`` was supported in Python 3 to get + the content of the buffer, but on Python 2 it would return the repr + ``<_cffi_backend.buffer object>``. This has been fixed. But you + should avoid using ``str(buf)``: it now gives inconsistent results + between Python 2 and Python 3 (this is similar to how ``str()`` + gives inconsistent results on regular byte strings). + ``ffi.typeof("C type" or cdata object)``: return an object of type ```` corresponding to the parsed string, or to the C type of the @@ -1257,13 +1273,19 @@ ``void *`` that contains an opaque reference to ``python_object``. You can pass it around to C functions or store it into C structures. Later, you can use ``ffi.from_handle(p)`` to retrive the original -``python_object`` from a value with the same ``void *`` pointer. The -cdata object returned by ``new_handle()`` has *ownership*, in the same -sense as ``ffi.new()`` or ``ffi.gc()``: the association ``void * -> -python_object`` is only valid as long as *this* exact cdata returned by -``new_handle()`` is alive. *Calling ffi.from_handle(p) is invalid and -will likely crash if the cdata object returned by new_handle() is not -kept alive!* *New in version 0.7.* +``python_object`` from a value with the same ``void *`` pointer. +*Calling ffi.from_handle(p) is invalid and will likely crash if +the cdata object returned by new_handle() is not kept alive!* +*New in version 0.7.* + +Note that ``from_handle()`` conceptually works like this: it searches in +the list of cdata objects made by ``new_handle()`` the one which has got +the same ``void *`` value; and then it fetches in that cdata object the +corresponding Python object. The cdata object keeps the Python object +alive, similar to how ``ffi.new()`` returns a cdata object that keeps a +piece of memory alive. If the cdata object *itself* is not alive any +more, then the association ``void * -> python_object`` is dead and +``from_handle()`` will crash. .. "versionadded:: 0.7" --- inlined in the previous paragraph diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -42,6 +42,11 @@ resultlist[:] = res def ask_supports_thread(): + if sys.platform == "darwin": + sys.stderr.write("OS/X: confusion between 'cc' versus 'gcc'") + sys.stderr.write(" (see issue 123)\n") + sys.stderr.write("will not use '__thread' in the C code\n") + return import distutils.errors from distutils.ccompiler import new_compiler compiler = new_compiler(force=1) @@ -91,11 +96,23 @@ if __name__ == '__main__': - from setuptools import setup, Feature, Extension - setup( - name='cffi', - description='Foreign Function Interface for Python calling C code.', - long_description=""" + from setuptools import setup, Extension + ext_modules = [] + if '__pypy__' not in sys.modules: + ext_modules.append(Extension( + name='_cffi_backend', + include_dirs=include_dirs, + sources=sources, + libraries=libraries, + define_macros=define_macros, + library_dirs=library_dirs, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, + )) + setup( + name='cffi', + description='Foreign Function Interface for Python calling C code.', + long_description=""" CFFI ==== @@ -106,36 +123,29 @@ ------- `Mailing list `_ - """, - version='0.8.1', - packages=['cffi'], - zip_safe=False, +""", + version='0.8.2', + packages=['cffi'], + zip_safe=False, - url='http://cffi.readthedocs.org', - author='Armin Rigo, Maciej Fijalkowski', - author_email='python-cffi at googlegroups.com', + url='http://cffi.readthedocs.org', + author='Armin Rigo, Maciej Fijalkowski', + author_email='python-cffi at googlegroups.com', - license='MIT', + license='MIT', - features={ - 'cextension': Feature( - "fast c backend for cpython", - standard='__pypy__' not in sys.modules, - ext_modules=[ - Extension(name='_cffi_backend', - include_dirs=include_dirs, - sources=sources, - libraries=libraries, - define_macros=define_macros, - library_dirs=library_dirs, - extra_compile_args=extra_compile_args, - extra_link_args=extra_link_args, - ), - ], - ), - }, + ext_modules=ext_modules, - install_requires=[ - 'pycparser', - ] - ) + install_requires=[ + 'pycparser', + ], + classifiers=[ + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.2', + 'Programming Language :: Python :: 3.3', + ], + ) diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1,4 +1,5 @@ import py +import platform import sys, ctypes from cffi import FFI, CDefError from testing.support import * @@ -755,6 +756,8 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff + elif platform.machine() == 'aarch64': # 4 bytes, unsigned + assert int(p) == 0xffffffff else: # 4 bytes, signed assert int(p) == -1 p = ffi.cast("int", u+'\u1234') @@ -1549,3 +1552,21 @@ ffi2.include(ffi1) p = ffi2.new("foo_p", [142]) assert p.x == 142 + + def test_struct_packed(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct nonpacked { char a; int b; };") + ffi.cdef("struct is_packed { char a; int b; };", packed=True) + assert ffi.sizeof("struct nonpacked") == 8 + assert ffi.sizeof("struct is_packed") == 5 + assert ffi.alignof("struct nonpacked") == 4 + assert ffi.alignof("struct is_packed") == 1 + s = ffi.new("struct is_packed[2]") + s[0].b = 42623381 + s[0].a = b'X' + s[1].b = -4892220 + s[1].a = b'Y' + assert s[0].b == 42623381 + assert s[0].a == b'X' + assert s[1].b == -4892220 + assert s[1].a == b'Y' diff --git a/testing/test_function.py b/testing/test_function.py --- a/testing/test_function.py +++ b/testing/test_function.py @@ -34,6 +34,12 @@ def getvalue(self): return self._value +lib_m = 'm' +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' class TestFunction(object): Backend = CTypesBackend @@ -43,18 +49,18 @@ ffi.cdef(""" double sin(double x); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x == math.sin(1.23) def test_sinf(self): if sys.platform == 'win32': - py.test.skip("no 'sinf'") + py.test.skip("no sinf found in the Windows stdlib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" float sinf(float x); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sinf(1.23) assert type(x) is float assert x != math.sin(1.23) # rounding effects @@ -66,14 +72,14 @@ ffi.cdef(""" void sin(double x); """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) x = m.sin(1.23) assert x is None def test_dlopen_filename(self): - path = ctypes.util.find_library("m") + path = ctypes.util.find_library(lib_m) if not path: - py.test.skip("libm not found") + py.test.skip("%s not found" % lib_m) ffi = FFI(backend=self.Backend()) ffi.cdef(""" double cos(double x); @@ -91,7 +97,7 @@ ffi.cdef(""" double cos(double x); """) - m = ffi.dlopen("m", ffi.RTLD_LAZY | ffi.RTLD_LOCAL) + m = ffi.dlopen(lib_m, ffi.RTLD_LAZY | ffi.RTLD_LOCAL) x = m.cos(1.23) assert x == math.cos(1.23) @@ -250,22 +256,14 @@ py.test.skip("probably no symbol 'stdout' in the lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" - int puts(const char *); - void *stdout, *stderr; + void *stdout; """) - ffi.C = ffi.dlopen(None) - pout = ffi.C.stdout - perr = ffi.C.stderr - assert repr(pout).startswith("' % (ptrtype, length)) @@ -59,7 +60,7 @@ return ', '.join([str(y) + str(x) for x, y, z in self.fields]) class FakeLibrary(object): - + def load_function(self, BType, name): return FakeFunction(BType, name) @@ -69,11 +70,17 @@ self.BType = str(BType) self.name = name +lib_m = "m" +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' def test_simple(): ffi = FFI(backend=FakeBackend()) ffi.cdef("double sin(double x);") - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) func = m.sin # should be a callable on real backends assert func.name == 'sin' assert func.BType == '), , False>' @@ -147,7 +154,7 @@ x, double/*several*//*comment*/y) /*on the same line*/ ; """) - m = ffi.dlopen("m") + m = ffi.dlopen(lib_m) func = m.sin assert func.name == 'sin' assert func.BType == ', ), , False>' diff --git a/testing/test_unicode_literals.py b/testing/test_unicode_literals.py --- a/testing/test_unicode_literals.py +++ b/testing/test_unicode_literals.py @@ -10,6 +10,13 @@ import sys, math from cffi import FFI +lib_m = "m" +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' + def test_cast(): ffi = FFI() @@ -55,7 +62,7 @@ def test_dlopen(): ffi = FFI() ffi.cdef("double sin(double x);") - m = ffi.dlopen("m") # unicode literal + m = ffi.dlopen(lib_m) # unicode literal x = m.sin(1.23) assert x == math.sin(1.23) diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -4,7 +4,12 @@ from testing.support import * +lib_m = ['m'] if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = ['msvcrt'] pass # no obvious -Werror equivalent on MSVC else: if (sys.platform == 'darwin' and @@ -63,13 +68,13 @@ def test_simple_case(): ffi = FFI() ffi.cdef("double sin(double x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) def test_rounding_1(): ffi = FFI() ffi.cdef("float sin(double x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=lib_m) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -77,7 +82,7 @@ def test_rounding_2(): ffi = FFI() ffi.cdef("double sin(float x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=lib_m) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -103,7 +108,7 @@ def test_longdouble(): ffi = FFI() ffi.cdef("long double sinl(long double x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=lib_m) for input in [1.23, ffi.cast("double", 1.23), ffi.cast("long double", 1.23)]: @@ -148,28 +153,27 @@ all_primitive_types = model.PrimitiveType.ALL_PRIMITIVE_TYPES -all_signed_integer_types = sorted(tp for tp in all_primitive_types - if all_primitive_types[tp] == 'i') -all_unsigned_integer_types = sorted(tp for tp in all_primitive_types - if all_primitive_types[tp] == 'u') +all_integer_types = sorted(tp for tp in all_primitive_types + if all_primitive_types[tp] == 'i') all_float_types = sorted(tp for tp in all_primitive_types if all_primitive_types[tp] == 'f') +def all_signed_integer_types(ffi): + return [x for x in all_integer_types if int(ffi.cast(x, -1)) < 0] + +def all_unsigned_integer_types(ffi): + return [x for x in all_integer_types if int(ffi.cast(x, -1)) > 0] + + def test_primitive_category(): for typename in all_primitive_types: tp = model.PrimitiveType(typename) C = tp.is_char_type() - U = tp.is_unsigned_type() - S = tp.is_signed_type() F = tp.is_float_type() I = tp.is_integer_type() assert C == (typename in ('char', 'wchar_t')) - assert U == (typename.startswith('unsigned ') or - typename == '_Bool' or typename == 'size_t' or - typename == 'uintptr_t' or typename.startswith('uint')) assert F == (typename in ('float', 'double', 'long double')) - assert S + U + F + C == 1 # one and only one of them is true - assert I == (S or U) + assert I + F + C == 1 # one and only one of them is true def test_all_integer_and_float_types(): typenames = [] @@ -207,7 +211,7 @@ def test_var_signed_integer_types(): ffi = FFI() - lst = all_signed_integer_types + lst = all_signed_integer_types(ffi) csource = "\n".join(["%s somevar_%s;" % (tp, tp.replace(' ', '_')) for tp in lst]) ffi.cdef(csource) @@ -226,7 +230,7 @@ def test_var_unsigned_integer_types(): ffi = FFI() - lst = all_unsigned_integer_types + lst = all_unsigned_integer_types(ffi) csource = "\n".join(["%s somevar_%s;" % (tp, tp.replace(' ', '_')) for tp in lst]) ffi.cdef(csource) @@ -247,7 +251,7 @@ def test_fn_signed_integer_types(): ffi = FFI() - lst = all_signed_integer_types + lst = all_signed_integer_types(ffi) cdefsrc = "\n".join(["%s somefn_%s(%s);" % (tp, tp.replace(' ', '_'), tp) for tp in lst]) ffi.cdef(cdefsrc) @@ -267,7 +271,7 @@ def test_fn_unsigned_integer_types(): ffi = FFI() - lst = all_unsigned_integer_types + lst = all_unsigned_integer_types(ffi) cdefsrc = "\n".join(["%s somefn_%s(%s);" % (tp, tp.replace(' ', '_'), tp) for tp in lst]) ffi.cdef(cdefsrc) @@ -464,11 +468,12 @@ def test_struct_float_vs_int(): if sys.platform == 'win32': py.test.skip("XXX fixme: only gives warnings") - for typename in all_signed_integer_types: + ffi = FFI() + for typename in all_signed_integer_types(ffi): for real in all_float_types: _check_field_match(typename, real, expect_mismatch=True) for typename in all_float_types: - for real in all_signed_integer_types: + for real in all_signed_integer_types(ffi): _check_field_match(typename, real, expect_mismatch=True) def test_struct_array_field(): @@ -1133,6 +1138,9 @@ xxx def test_opaque_integer_as_function_result(): + import platform + if platform.machine().startswith('sparc'): + py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)') # XXX bad abuse of "struct { ...; }". It only works a bit by chance # anyway. XXX think about something better :-( ffi = FFI() @@ -1855,3 +1863,24 @@ def test_various_calls_libffi(): _test_various_calls(force_libffi=True) + +def test_ptr_to_opaque(): + ffi = FFI() + ffi.cdef("typedef ... foo_t; int f1(foo_t*); foo_t *f2(int);") + lib = ffi.verify(""" + #include + typedef struct { int x; } foo_t; + int f1(foo_t* p) { + int x = p->x; + free(p); + return x; + } + foo_t *f2(int x) { + foo_t *p = malloc(sizeof(foo_t)); + p->x = x; + return p; + } + """) + p = lib.f2(42) + x = lib.f1(p) + assert x == 42 diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -9,7 +9,7 @@ '0.4.2': '0.4', # did not change '0.7.1': '0.7', # did not change '0.7.2': '0.7', # did not change - '0.8.1': '0.8', # did not change + '0.8.1': '0.8', # did not change (essentially) } def test_version(): @@ -24,7 +24,7 @@ content = open(p).read() # v = cffi.__version__ - assert ("version = '%s'\n" % BACKEND_VERSIONS.get(v, v)) in content + assert ("version = '%s'\n" % v[:3]) in content assert ("release = '%s'\n" % v) in content def test_doc_version_file(): diff --git a/testing/test_zdistutils.py b/testing/test_zdistutils.py --- a/testing/test_zdistutils.py +++ b/testing/test_zdistutils.py @@ -7,6 +7,13 @@ class DistUtilsTest(object): + def setup_class(self): + self.lib_m = "m" + if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + self.lib_m = 'msvcrt' def test_locate_engine_class(self): cls = _locate_engine_class(FFI(), self.generic) @@ -25,7 +32,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=[self.lib_m]) v.write_source() with open(v.sourcefilename, 'r') as f: data = f.read() @@ -35,7 +43,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=[self.lib_m]) v.sourcefilename = filename = str(udir.join('write_source.c')) v.write_source() assert filename == v.sourcefilename @@ -47,7 +56,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=[self.lib_m]) try: from StringIO import StringIO except ImportError: @@ -60,7 +70,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=[self.lib_m]) v.compile_module() assert v.get_module_name().startswith('_cffi_') if v.generates_python_module(): @@ -71,7 +82,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!2*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=[self.lib_m]) basename = self.__class__.__name__ + 'test_compile_module' v.modulefilename = filename = str(udir.join(basename + '.so')) v.compile_module() @@ -87,7 +99,8 @@ ffi = FFI() ffi.cdef("%s sin(double x);" % csrc) v = Verifier(ffi, "#include ", - force_generic_engine=self.generic) + force_generic_engine=self.generic, + libraries=[self.lib_m]) names.append(v.get_module_name()) assert names[0] == names[1] != names[2] @@ -104,7 +117,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!3*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=[self.lib_m]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -114,7 +128,8 @@ csrc = '/*hi there %s!4*/#include "test_verifier_args.h"\n' % self udir.join('test_verifier_args.h').write('#include \n') v = Verifier(ffi, csrc, include_dirs=[str(udir)], - force_generic_engine=self.generic) + force_generic_engine=self.generic, + libraries=[self.lib_m]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -122,7 +137,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = "/*6%s*/\n#include " % self - lib = ffi.verify(csrc, force_generic_engine=self.generic) + lib = ffi.verify(csrc, force_generic_engine=self.generic, + libraries=[self.lib_m]) assert lib.sin(12.3) == math.sin(12.3) assert isinstance(ffi.verifier, Verifier) with open(ffi.verifier.sourcefilename, 'r') as f: @@ -139,7 +155,8 @@ #endif ''' lib = ffi.verify(csrc, define_macros=[('TEST_EXTENSION_OBJECT', '1')], - force_generic_engine=self.generic) + force_generic_engine=self.generic, + libraries=[self.lib_m]) assert lib.sin(12.3) == math.sin(12.3) v = ffi.verifier ext = v.get_extension() @@ -152,7 +169,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there9!%s*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=[self.lib_m]) assert not os.path.exists(v.sourcefilename) v.get_extension() assert os.path.exists(v.sourcefilename) From noreply at buildbot.pypy.org Fri Mar 7 07:55:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 07:55:07 +0100 (CET) Subject: [pypy-commit] cffi release-0.8: Update MD5/SHA Message-ID: <20140307065507.A5CFF1C13AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1479:0687b2a8852a Date: 2014-03-07 07:54 +0100 http://bitbucket.org/cffi/cffi/changeset/0687b2a8852a/ Log: Update MD5/SHA diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -89,9 +89,9 @@ - Or grab the most current version by following the instructions below. - - MD5: ... + - MD5: 37fc88c62f40d04e8a18192433f951ec - - SHA: ... + - SHA: 75a6c433664a7a38d4d03cecbdc72cef4c3cceac * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From noreply at buildbot.pypy.org Fri Mar 7 08:05:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 08:05:22 +0100 (CET) Subject: [pypy-commit] cffi default: We're not at 0.9 yet :-) Only 0.8.2. Message-ID: <20140307070522.CABEC1C13AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1480:0a3126efe878 Date: 2014-03-07 08:05 +0100 http://bitbucket.org/cffi/cffi/changeset/0a3126efe878/ Log: We're not at 0.9 yet :-) Only 0.8.2. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -851,7 +851,7 @@ ``ffi`` normally caches the string ``"int[]"`` to not need to re-parse it all the time. -.. versionadded:: 0.9 +.. versionadded:: 0.8.2 The ``ffi.cdef()`` call takes an optional argument ``packed``: if True, then all structs declared within this cdef are "packed". This has a meaning similar to ``__attribute__((packed))`` in GCC. It @@ -1195,8 +1195,8 @@ owned memory will not be freed as long as the buffer is alive. Moreover buffer objects now support weakrefs to them. -.. versionchanged:: 0.9 - Before version 0.9, ``bytes(buf)`` was supported in Python 3 to get +.. versionchanged:: 0.8.2 + Before version 0.8.2, ``bytes(buf)`` was supported in Python 3 to get the content of the buffer, but on Python 2 it would return the repr ``<_cffi_backend.buffer object>``. This has been fixed. But you should avoid using ``str(buf)``: it now gives inconsistent results From noreply at buildbot.pypy.org Fri Mar 7 10:04:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 10:04:14 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: Tweaks and comments Message-ID: <20140307090414.816CB1D2732@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69775:515386df236f Date: 2014-03-07 10:03 +0100 http://bitbucket.org/pypy/pypy/changeset/515386df236f/ Log: Tweaks and comments diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -593,14 +593,15 @@ return result; } -static inline timespec_add(struct timespec *t, unsigned long long incr) +static inline timespec_add(struct timespec *t, long incr) { - unsigned long long nsec = t->tv_nsec + incr; + long nsec = t->tv_nsec + incr; if (nsec >= 1000000000) { - t->tv_sec += (nsec / 1000000000); - nsec %= 1000000000; + t->tv_sec += 1; + nsec -= 1000000000; + assert(nsec < 1000000000); } - t->tv_nsec = (long)nsec; + t->tv_nsec = nsec; } static inline void _acquire_gil_or_wait_for_fastgil_to_be_nonzero(void) @@ -613,25 +614,25 @@ * before doing an external C call, the generated assembler sets this global variable to an in-stack pointer to its - ASM_FRAMEDATA_HEAD structure (for asmgcc) or to 1 (for + ASM_FRAMEDATA structure (for asmgcc) or to 1 (for shadowstack, when implemented) * afterwards, it uses an atomic instruction to get the current value stored in the variable and to replace it with zero - * if the old value was still the ASM_FRAMEDATA_HEAD pointer of + * if the old value was still the ASM_FRAMEDATA pointer of this thread, everything is fine * otherwise, someone else stole the GIL. The assembler calls a - helper. This helper first needs to unlink this thread's - ASM_FRAMEDATA_HEAD from the chained list where it was put by + helper. This helper will need (as the last step) to unlink this + thread's ASM_FRAMEDATA from the chained list where it was put by the stealing code. If the old value was zero, it means that the stealing code was this function here. In that case, the helper needs to call RPyGilAcquire() again. If, on the other - hand, the old value is another ASM_FRAMEDATA_HEAD from a + hand, the old value is another ASM_FRAMEDATA from a different thread, it means we just stole the fast GIL from this other thread. In that case we store that different - ASM_FRAMEDATA_HEAD into the chained list and return immediately. + ASM_FRAMEDATA into the chained list and return immediately. This function is a balancing act inspired by CPython 2.7's threading.py for _Condition.wait() (not the PyPy version, which @@ -641,9 +642,10 @@ the real GIL is released, we won't ever see the fast GIL being 1. The scheme here sleeps very little at first, and longer as time goes on. Eventually, the real GIL should be released, so there - is no point in trying to bound the maximal length of the wait. + is little point in trying to bound the maximal length of the wait, + but we do it anyway to avoid bad surprizes in corner cases. */ - unsigned long long delay = 0; + long delay = 400000; /* in ns; initial delay is 0.4 ms */ struct timespec t; while (1) { @@ -671,15 +673,14 @@ } /* sleep for a bit of time */ - if (delay == 0) { - clock_gettime(CLOCK_REALTIME, &t); - delay = 100000; /* in ns; initial delay is 0.1 ms */ - } + clock_gettime(CLOCK_REALTIME, &t); timespec_add(&t, delay); int error = pthread_mutex_timedlock(&mutex_gil, &t); if (error == ETIMEDOUT) { delay = (delay * 3) / 2; + if (delay > 50000000) + delay = 50000000; /* maximum delay 50 ms */ continue; } else { @@ -712,3 +713,6 @@ assert_has_the_gil(); _debug_print("RPyGilAcquire\n"); } + +XXX even without a gil, we need to check at least for a RPY_FASTGIL_VARNAME +that is not null, in callbacks From noreply at buildbot.pypy.org Fri Mar 7 10:20:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 10:20:42 +0100 (CET) Subject: [pypy-commit] pypy default: Update to cffi/a6d2e28138cf Message-ID: <20140307092042.24A861D27A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69776:e1d28ad4292c Date: 2014-03-07 07:33 +0100 http://bitbucket.org/pypy/pypy/changeset/e1d28ad4292c/ Log: Update to cffi/a6d2e28138cf diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -8,7 +8,7 @@ appleveldefs = { } interpleveldefs = { - '__version__': 'space.wrap("0.8")', + '__version__': 'space.wrap("0.8.2")', 'load_library': 'libraryobj.load_library', diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3186,4 +3186,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8" + assert __version__ == "0.8.2" From noreply at buildbot.pypy.org Fri Mar 7 10:20:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 10:20:43 +0100 (CET) Subject: [pypy-commit] pypy default: Update to cffi/0a3126efe878 Message-ID: <20140307092043.7F3A71D27A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69777:483c8263dbff Date: 2014-03-07 10:17 +0100 http://bitbucket.org/pypy/pypy/changeset/483c8263dbff/ Log: Update to cffi/0a3126efe878 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3175,6 +3175,8 @@ assert alignof(BStruct) == 1 def test_packed_with_bitfields(): + if sys.platform == "win32": + py.test.skip("testing gcc behavior") BLong = new_primitive_type("long") BChar = new_primitive_type("char") BStruct = new_struct_type("struct foo") From noreply at buildbot.pypy.org Fri Mar 7 10:20:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 10:20:44 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140307092044.D63531D27A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69778:ac3ce8b66c72 Date: 2014-03-07 10:19 +0100 http://bitbucket.org/pypy/pypy/changeset/ac3ce8b66c72/ Log: merge heads diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -66,7 +66,7 @@ return _DAYS_IN_MONTH[month] def _days_before_month(year, month): - "year, month -> number of days in year preceeding first day of month." + "year, month -> number of days in year preceding first day of month." assert 1 <= month <= 12, 'month must be in 1..12' return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year)) @@ -251,7 +251,7 @@ def _check_utc_offset(name, offset): assert name in ("utcoffset", "dst") if offset is None: - return None + return if not isinstance(offset, timedelta): raise TypeError("tzinfo.%s() must return None " "or timedelta, not '%s'" % (name, type(offset))) @@ -497,8 +497,7 @@ # secondsfrac isn't referenced again if isinstance(microseconds, float): - microseconds += usdouble - microseconds = _round(microseconds) + microseconds = _round(microseconds + usdouble) seconds, microseconds = divmod(microseconds, 1000000) days, seconds = divmod(seconds, 24*3600) d += days @@ -510,8 +509,7 @@ days, seconds = divmod(seconds, 24*3600) d += days s += int(seconds) - microseconds += usdouble - microseconds = _round(microseconds) + microseconds = _round(microseconds + usdouble) assert isinstance(s, int) assert isinstance(microseconds, int) assert abs(s) <= 3 * 24 * 3600 @@ -1140,7 +1138,8 @@ self = object.__new__(cls) self.__setstate(hour, minute or None) return self - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) + hour, minute, second, microsecond = _check_time_fields( + hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) self = object.__new__(cls) self._hour = hour @@ -1444,7 +1443,8 @@ self.__setstate(year, month) return self year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) + hour, minute, second, microsecond = _check_time_fields( + hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) self = object.__new__(cls) self._year = year diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -43,7 +43,7 @@ try: iwtd, owtd, ewtd = select.select([readend], [], [], 0) assert iwtd == owtd == ewtd == [] - writeend.send('X') + writeend.send(b'X') iwtd, owtd, ewtd = select.select([readend], [], []) assert iwtd == [readend] assert owtd == ewtd == [] @@ -84,7 +84,7 @@ if owtd == []: break assert owtd == [writeend] - total_out += writeend.send('x' * 512) + total_out += writeend.send(b'x' * 512) total_in = 0 while True: iwtd, owtd, ewtd = select.select([readend], [], [], 0) @@ -94,7 +94,7 @@ assert iwtd == [readend] data = readend.recv(4096) assert len(data) > 0 - assert data == 'x' * len(data) + assert data == b'x' * len(data) total_in += len(data) assert total_in == total_out finally: @@ -110,7 +110,7 @@ readend, writeend = self.getpair() try: try: - total_out = writeend.send('x' * 512) + total_out = writeend.send(b'x' * 512) finally: # win32 sends the 'closed' event immediately, even when # more data is available @@ -126,7 +126,7 @@ data = readend.recv(4096) if len(data) == 0: break - assert data == 'x' * len(data) + assert data == b'x' * len(data) total_in += len(data) # win32: check that closing the socket exits the loop if sys.platform == 'win32' and total_in == total_out: @@ -171,12 +171,12 @@ for i in range(50): n = (i*3) % 10 - writeends[n].send('X') + writeends[n].send(b'X') iwtd, owtd, ewtd = select.select(readends, [], []) assert iwtd == [readends[n]] assert owtd == ewtd == [] data = readends[n].recv(1) - assert data == 'X' + assert data == b'X' finally: for fd in readends + writeends: @@ -251,34 +251,30 @@ "usemodules": ["select", "_socket", "rctime", "thread"], } - def setup_class(cls): - space = cls.space - w_import = space.getattr(space.builtin, space.wrap("__import__")) - w_socketmod = space.call_function(w_import, space.wrap("socket")) - cls.w_sock = cls.space.call_method(w_socketmod, "socket") - cls.w_sock_err = space.getattr(w_socketmod, space.wrap("error")) - - try_ports = [1023] + range(20000, 30000, 437) + def w_make_server(self): + import socket + if hasattr(self, 'sock'): + return self.sock + self.sock = socket.socket() + try_ports = [1023] + list(range(20000, 30000, 437)) for port in try_ports: - print 'binding to port %d:' % (port,), - cls.w_sockaddress = space.wrap(('127.0.0.1', port)) + print('binding to port %d:' % (port,)) + self.sockaddress = ('127.0.0.1', port) try: - space.call_method(cls.w_sock, "bind", cls.w_sockaddress) + self.sock.bind(self.sockaddress) break - except OperationError, e: # should get a "Permission denied" - if not e.match(space, space.getattr(w_socketmod, space.wrap("error"))): - raise - print e.errorstr(space) - except cls.w_sock_err, e: # should get a "Permission denied" - print e + except socket.error as e: # should get a "Permission denied" + print(e) else: - raise e + raise(e) def w_getpair(self): """Helper method which returns a pair of connected sockets.""" import socket import thread + self.make_server() + self.sock.listen(1) s2 = socket.socket() thread.start_new_thread(s2.connect, (self.sockaddress,)) diff --git a/pypy/module/test_lib_pypy/test_datetime.py b/pypy/module/test_lib_pypy/test_datetime.py --- a/pypy/module/test_lib_pypy/test_datetime.py +++ b/pypy/module/test_lib_pypy/test_datetime.py @@ -17,6 +17,14 @@ datetime.tzinfo()]: raises(AttributeError, 'x.abc = 1') +def test_timedelta_init_long(): + td = datetime.timedelta(microseconds=20000000000000000000) + assert td.days == 231481481 + assert td.seconds == 41600 + td = datetime.timedelta(microseconds=20000000000000000000.) + assert td.days == 231481481 + assert td.seconds == 41600 + def test_unpickle(): e = raises(TypeError, datetime.date, '123') assert e.value.args[0] == 'an integer is required' From noreply at buildbot.pypy.org Fri Mar 7 13:17:01 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 7 Mar 2014 13:17:01 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Rename type_ -> type. Message-ID: <20140307121701.E5D871C13AD@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r69779:7ca743680431 Date: 2014-03-07 13:05 +0100 http://bitbucket.org/pypy/pypy/changeset/7ca743680431/ Log: Rename type_ -> type. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -173,7 +173,7 @@ ', '.join(indices), SIGNED_TYPE) elif isinstance(value, llgroup.GroupMemberOffset): grpptr = get_repr(value.grpptr) - grpptr.type_.to.write_group(grpptr.value._obj) + grpptr.type.to.write_group(grpptr.value._obj) member = get_repr(value.member) return ('ptrtoint({member.T} getelementptr({grpptr.T} null, ' '{} 0, i32 {value.index}) to {})' @@ -209,9 +209,9 @@ if not indices: indices.append('i64 0') if isinstance(offset, llmemory.FieldOffset): - type_ = database.get_type(offset.TYPE) + type = database.get_type(offset.TYPE) indices.append('i32 {}'.format( - type_.fldnames_wo_voids.index(offset.fldname))) + type.fldnames_wo_voids.index(offset.fldname))) return offset.TYPE._flds[offset.fldname] if isinstance(offset, llmemory.ArrayLengthOffset): if offset.TYPE._gckind == 'gc': @@ -347,10 +347,10 @@ llmemory.Address: LLVMAddress } -for type_ in rffi.NUMBER_TYPES + [lltype.Char, lltype.UniChar]: - if type_ not in PRIMITIVES: - PRIMITIVES[type_] = IntegralType(rffi.sizeof(type_) * 8, - rffi.is_unsigned(type_)) +for type in rffi.NUMBER_TYPES + [lltype.Char, lltype.UniChar]: + if type not in PRIMITIVES: + PRIMITIVES[type] = IntegralType(rffi.sizeof(type) * 8, + rffi.is_unsigned(type)) LLVMSigned = PRIMITIVES[lltype.Signed] SIGNED_TYPE = LLVMSigned.repr_type() LLVMHalfWord = PRIMITIVES[llgroup.HALFWORD] @@ -364,8 +364,8 @@ if to is not None: self.to = to - def setup_from_lltype(self, db, type_): - self.to = db.get_type(type_.TO) + def setup_from_lltype(self, db, type): + self.to = db.get_type(type.TO) def repr_type(self, extra_len=None): return self.to.repr_type() + '*' @@ -423,14 +423,14 @@ self.varsize = fields[-1][0].varsize self.size_variants = {} - def setup_from_lltype(self, db, type_): - if (type_._hints.get('typeptr', False) and + def setup_from_lltype(self, db, type): + if (type._hints.get('typeptr', False) and db.genllvm.translator.config.translation.gcremovetypeptr): - self.setup('%' + type_._name, [], True) + self.setup('%' + type._name, [], True) return - fields = ((db.get_type(type_._flds[f]), f) for f in type_._names) - is_gc = type_._gckind == 'gc' and type_._first_struct() == (None, None) - self.setup('%' + type_._name, fields, is_gc) + fields = ((db.get_type(type._flds[f]), f) for f in type._names) + is_gc = type._gckind == 'gc' and type._first_struct() == (None, None) + self.setup('%' + type._name, fields, is_gc) def repr_type(self, extra_len=None): if extra_len not in self.size_variants: @@ -497,8 +497,8 @@ self.of = of self.length = length - def setup_from_lltype(self, db, type_): - self.setup(db.get_type(type_.OF), getattr(type_, 'length', None)) + def setup_from_lltype(self, db, type): + self.setup(db.get_type(type.OF), getattr(type, 'length', None)) def repr_type(self, extra_len=None): if self.of is LLVMVoid: @@ -548,7 +548,7 @@ self.of.repr_type_and_value(item) for item in items)) def add_indices(self, gep, key): - if key.type_ is LLVMVoid: + if key.type is LLVMVoid: index = int(key.value[4:]) else: index = key.V @@ -575,8 +575,8 @@ fields = [(LLVMSigned, 'len'), (self.bare_array_type, 'items')] self.struct_type.setup('%array_of_' + of.repr_of_type(), fields, is_gc) - def setup_from_lltype(self, db, type_): - self.setup(db.get_type(type_.OF), type_._gckind == 'gc') + def setup_from_lltype(self, db, type): + self.setup(db.get_type(type.OF), type._gckind == 'gc') def repr_type(self, extra_len=None): return self.struct_type.repr_type(extra_len) @@ -612,8 +612,8 @@ def __init__(self): self.written = None - def setup_from_lltype(self, db, type_): - self.typestr = '%group_' + type_.name + def setup_from_lltype(self, db, type): + self.typestr = '%group_' + type.name def repr_ref(self, ptr_type, obj): ptr_type.refs[obj] = '@group_' + obj.name @@ -641,9 +641,9 @@ class FuncType(Type): - def setup_from_lltype(self, db, type_): - self.result = db.get_type(type_.RESULT) - self.args = [db.get_type(argtype) for argtype in type_.ARGS + def setup_from_lltype(self, db, type): + self.result = db.get_type(type.RESULT) + self.args = [db.get_type(argtype) for argtype in type.ARGS if argtype is not lltype.Void] def repr_type(self, extra_len=None): @@ -686,7 +686,7 @@ class OpaqueType(Type): typestr = '{}' - def setup_from_lltype(self, db, type_): + def setup_from_lltype(self, db, type): pass def repr_of_type(self): @@ -726,23 +726,23 @@ self.hashes = [] self.stack_bottoms = [] - def get_type(self, type_): + def get_type(self, type): try: - return self.types[type_] + return self.types[type] except KeyError: - if isinstance(type_, lltype.Typedef): - return self.get_type(type_.OF) - elif (isinstance(type_, lltype.Array) and - type_._hints.get('nolength', False)): + if isinstance(type, lltype.Typedef): + return self.get_type(type.OF) + elif (isinstance(type, lltype.Array) and + type._hints.get('nolength', False)): class_ = BareArrayType - elif type_ is lltype.RuntimeTypeInfo: + elif type is lltype.RuntimeTypeInfo: class_ = self.genllvm.gcpolicy.RttiType else: - class_ = _LL_TO_LLVM[type(type_)] - self.types[type_] = ret = class_() - ret.setup_from_lltype(self, type_) + class_ = _LL_TO_LLVM[type.__class__] + self.types[type] = ret = class_() + ret.setup_from_lltype(self, type) if ret.is_gc: - _llvm_needs_header[type_] = database.genllvm.gcpolicy \ + _llvm_needs_header[type] = database.genllvm.gcpolicy \ .get_gc_fields_lltype() # hint for ll2ctypes return ret @@ -759,70 +759,70 @@ OPS = { } -for type_ in ['int', 'uint', 'llong', 'ullong', 'lllong']: - OPS[type_ + '_lshift'] = 'shl' - OPS[type_ + '_rshift'] = 'lshr' if type_[0] == 'u' else 'ashr' - OPS[type_ + '_add'] = 'add' if type_[0] == 'u' else 'add nsw' - OPS[type_ + '_sub'] = 'sub' if type_[0] == 'u' else 'sub nsw' - OPS[type_ + '_mul'] = 'mul' if type_[0] == 'u' else 'mul nsw' - OPS[type_ + '_floordiv'] = 'udiv' if type_[0] == 'u' else 'sdiv' - OPS[type_ + '_mod'] = 'urem' if type_[0] == 'u' else 'srem' +for type in ['int', 'uint', 'llong', 'ullong', 'lllong']: + OPS[type + '_lshift'] = 'shl' + OPS[type + '_rshift'] = 'lshr' if type[0] == 'u' else 'ashr' + OPS[type + '_add'] = 'add' if type[0] == 'u' else 'add nsw' + OPS[type + '_sub'] = 'sub' if type[0] == 'u' else 'sub nsw' + OPS[type + '_mul'] = 'mul' if type[0] == 'u' else 'mul nsw' + OPS[type + '_floordiv'] = 'udiv' if type[0] == 'u' else 'sdiv' + OPS[type + '_mod'] = 'urem' if type[0] == 'u' else 'srem' for op in ['and', 'or', 'xor']: - OPS['{}_{}'.format(type_, op)] = op + OPS['{}_{}'.format(type, op)] = op -for type_ in ['float']: +for type in ['float']: for op in ['add', 'sub', 'mul', 'div']: if op == 'div': - OPS['{}_truediv'.format(type_)] = 'f' + op + OPS['{}_truediv'.format(type)] = 'f' + op else: - OPS['{}_{}'.format(type_, op)] = 'f' + op + OPS['{}_{}'.format(type, op)] = 'f' + op -for type_, prefix in [('char', 'u'), ('unichar', 'u'), ('int', 's'), +for type, prefix in [('char', 'u'), ('unichar', 'u'), ('int', 's'), ('uint', 'u'), ('llong', 's'), ('ullong', 'u'), ('lllong', 's'), ('adr', 's'), ('ptr', 's')]: - OPS[type_ + '_eq'] = 'icmp eq' - OPS[type_ + '_ne'] = 'icmp ne' + OPS[type + '_eq'] = 'icmp eq' + OPS[type + '_ne'] = 'icmp ne' for op in ['gt', 'ge', 'lt', 'le']: - OPS['{}_{}'.format(type_, op)] = 'icmp {}{}'.format(prefix, op) + OPS['{}_{}'.format(type, op)] = 'icmp {}{}'.format(prefix, op) -for type_ in ['float']: - OPS[type_ + '_ne'] = 'fcmp une' +for type in ['float']: + OPS[type + '_ne'] = 'fcmp une' for op in ['eq', 'gt', 'ge', 'lt', 'le']: - OPS['{}_{}'.format(type_, op)] = 'fcmp o' + op + OPS['{}_{}'.format(type, op)] = 'fcmp o' + op -del type_ +del type del op class ConstantRepr(object): - def __init__(self, type_, value): - self.type_ = type_ + def __init__(self, type, value): + self.type = type self.value = value @property def T(self): - return self.type_.repr_type() + return self.type.repr_type() @property def V(self): - return self.type_.repr_value(self.value) + return self.type.repr_value(self.value) @property def TV(self): - return self.type_.repr_type_and_value(self.value) + return self.type.repr_type_and_value(self.value) def __repr__(self): - return '<{} {}>'.format(self.type_.repr_type(), self.value) + return '<{} {}>'.format(self.type.repr_type(), self.value) class VariableRepr(object): - def __init__(self, type_, name): - self.type_ = type_ + def __init__(self, type, name): + self.type = type self.name = name @property def T(self): - return self.type_.repr_type() + return self.type.repr_type() @property def V(self): @@ -830,10 +830,10 @@ @property def TV(self): - return '{} {}'.format(self.type_.repr_type(), self.name) + return '{} {}'.format(self.type.repr_type(), self.name) def __repr__(self): - return '<{} {}>'.format(self.type_.repr_type(), self.name) + return '<{} {}>'.format(self.type.repr_type(), self.name) def get_repr(cov): @@ -1008,8 +1008,8 @@ if opname in OPS: binary_op = OPS[opname] assert len(opargs) == 2 - if ((opargs[0].type_ != opargs[1].type_) and - (opargs[0].type_.bitwidth != opargs[1].type_.bitwidth) and + if ((opargs[0].type != opargs[1].type) and + (opargs[0].type.bitwidth != opargs[1].type.bitwidth) and isinstance(opargs[1], VariableRepr)): assert binary_op in ('shl', 'lshr', 'ashr') t = self._tmp() @@ -1030,8 +1030,8 @@ else: raise NotImplementedError(op) - def _tmp(self, type_=None): - return VariableRepr(type_, '%tmp{}'.format(next(self.tmp_counter))) + def _tmp(self, type=None): + return VariableRepr(type, '%tmp{}'.format(next(self.tmp_counter))) def op_llvm_gcmap(self, result): self.w('{result.V} = bitcast i8* @__gcmap to {result.T}' @@ -1046,8 +1046,8 @@ .format(**locals())) def op_llvm_stack_malloc(self, result): - type_ = result.type_.to.repr_type() - self.w('{result.V} = alloca {type_}'.format(**locals())) + type = result.type.to.repr_type() + self.w('{result.V} = alloca {type}'.format(**locals())) # TODO: implement @@ -1065,7 +1065,7 @@ def op_debug_llinterpcall(self, result, *args): self.w('call void @abort() noreturn nounwind') - if result.type_ is not LLVMVoid: + if result.type is not LLVMVoid: self.w('{result.V} = bitcast {result.T} undef to {result.T}' .format(**locals())) @@ -1112,21 +1112,21 @@ pass def _cast(self, to, fr): - if fr.type_ is LLVMVoid: + if fr.type is LLVMVoid: return - elif fr.type_ is to.type_: + elif fr.type is to.type: op = 'bitcast' - elif to.type_ is LLVMBool: - if isinstance(fr.type_, IntegralType): + elif to.type is LLVMBool: + if isinstance(fr.type, IntegralType): self.w('{to.V} = icmp ne {fr.TV}, 0'.format(**locals())) - elif isinstance(fr.type_, FloatType): - zer = ConstantRepr(fr.type_, 0.0) + elif isinstance(fr.type, FloatType): + zer = ConstantRepr(fr.type, 0.0) self.w('{to.V} = fcmp une {fr.TV}, {zer.V}'.format(**locals())) else: raise NotImplementedError return else: - op = fr.type_.get_cast_op(to.type_) + op = fr.type.get_cast_op(to.type) self.w('{to.V} = {op} {fr.TV} to {to.T}'.format(**locals())) op_force_cast = _cast op_raw_malloc_usage = _cast @@ -1136,10 +1136,10 @@ (fn.value._obj is None or fn.value._obj._name == 'PYPY_NO_OP')): return - it = iter(fn.type_.to.args) + it = iter(fn.type.to.args) tmp = [] for arg in args: - if arg.type_ is LLVMVoid: + if arg.type is LLVMVoid: continue argtype = next(it) if isinstance(argtype, StructType): @@ -1149,10 +1149,10 @@ tmp.append('{arg.TV}'.format(arg=arg)) args = ', '.join(tmp) - if result.type_ is LLVMVoid: + if result.type is LLVMVoid: fmt = 'call void {fn.V}({args})' - elif (isinstance(result.type_, PtrType) and - isinstance(result.type_.to, FuncType)): + elif (isinstance(result.type, PtrType) and + isinstance(result.type.to, FuncType)): fmt = '{result.V} = call {fn.TV}({args})' else: fmt = '{result.V} = call {result.T} {fn.V}({args})' @@ -1161,9 +1161,9 @@ def _get_element_ptr(self, ptr, fields, result): gep = GEP(self, ptr) - type_ = ptr.type_.to + type = ptr.type.to for field in fields: - type_ = type_.add_indices(gep, field) + type = type.add_indices(gep, field) gep.assign(result) def _get_element_ptr_op(self, result, ptr, *fields): @@ -1171,7 +1171,7 @@ op_getsubstruct = op_getarraysubstruct = _get_element_ptr_op def _get_element(self, result, var, *fields): - if result.type_ is not LLVMVoid: + if result.type is not LLVMVoid: t = self._tmp() self._get_element_ptr(var, fields, t) self.w('{result.V} = load {result.T}* {t.V}'.format(**locals())) @@ -1182,7 +1182,7 @@ def _set_element(self, result, var, *rest): fields = rest[:-1] value = rest[-1] - if value.type_ is not LLVMVoid: + if value.type is not LLVMVoid: t = self._tmp() self._get_element_ptr(var, fields, t) self.w('store {value.TV}, {value.T}* {t.V}'.format(**locals())) @@ -1191,32 +1191,32 @@ op_setarrayitem = op_bare_setarrayitem = _set_element def op_direct_fieldptr(self, result, ptr, field): - t = self._tmp(PtrType(result.type_.to.of)) + t = self._tmp(PtrType(result.type.to.of)) self._get_element_ptr(ptr, [field], t) self.w('{result.V} = bitcast {t.TV} to {result.T}'.format(**locals())) def op_direct_arrayitems(self, result, ptr): - t = self._tmp(PtrType(result.type_.to.of)) + t = self._tmp(PtrType(result.type.to.of)) self._get_element_ptr(ptr, [ConstantRepr(LLVMSigned, 0)], t) self.w('{result.V} = bitcast {t.TV} to {result.T}'.format(**locals())) def op_direct_ptradd(self, result, var, val): - t = self._tmp(PtrType(result.type_.to.of)) + t = self._tmp(PtrType(result.type.to.of)) self.w('{t.V} = getelementptr inbounds {var.TV}, i64 0, {val.TV}' .format(**locals())) self.w('{result.V} = bitcast {t.TV} to {result.T}'.format(**locals())) def op_getarraysize(self, result, ptr, *fields): gep = GEP(self, ptr) - type_ = ptr.type_.to + type = ptr.type.to for field in fields: - type_ = type_.add_indices(gep, field) + type = type.add_indices(gep, field) - if isinstance(type_, BareArrayType): - self.w('{result.V} = add {result.T} 0, {type_.length}' + if isinstance(type, BareArrayType): + self.w('{result.V} = add {result.T} 0, {type.length}' .format(**locals())) else: - if type_.is_gc: + if type.is_gc: gep.add_field_index(1) else: gep.add_field_index(0) @@ -1243,7 +1243,7 @@ def op_int_abs(self, result, var): ispos = self._tmp() - neg = self._tmp(var.type_) + neg = self._tmp(var.type) self.w('{ispos.V} = icmp sgt {var.TV}, -1'.format(**locals())) self.w('{neg.V} = sub {var.T} 0, {var.V}'.format(**locals())) self.w('{result.V} = select i1 {ispos.V}, {var.TV}, {neg.TV}' @@ -1306,7 +1306,7 @@ def op_float_abs(self, result, var): ispos = self._tmp() - neg = self._tmp(var.type_) + neg = self._tmp(var.type) self.w('{ispos.V} = fcmp oge {var.TV}, 0.0'.format(**locals())) self.w('{neg.V} = fsub {var.T} 0.0, {var.V}'.format(**locals())) self.w('{result.V} = select i1 {ispos.V}, {var.TV}, {neg.TV}' @@ -1332,11 +1332,11 @@ return t3 def op_raw_load(self, result, addr, offset): - addr = self._get_addr(result.type_, addr, offset) + addr = self._get_addr(result.type, addr, offset) self.w('{result.V} = load {addr.TV}'.format(**locals())) def op_raw_store(self, result, addr, offset, value): - addr = self._get_addr(value.type_, addr, offset) + addr = self._get_addr(value.type, addr, offset) self.w('store {value.TV}, {addr.TV}'.format(**locals())) op_bare_raw_store = op_raw_store @@ -1355,7 +1355,7 @@ self.w('{result.V} = icmp ne {compactoffset.TV}, 0'.format(**locals())) def op_combine_ushort(self, result, ushort, rest): - t = self._tmp(result.type_) + t = self._tmp(result.type) self.w('{t.V} = zext {ushort.TV} to {t.T}'.format(**locals())) self.w('{result.V} = or {t.TV}, {rest.V}'.format(**locals())) @@ -1426,7 +1426,7 @@ self.w('{result.V} = bitcast i1 false to i1'.format(**locals())) def op_stack_current(self, result): - if result.type_ is LLVMAddress: + if result.type is LLVMAddress: self.op_direct_call(result, get_repr(llvm_frameaddress), null_int) else: t = self._tmp(LLVMAddress) @@ -1471,12 +1471,12 @@ if isinstance(arg, Constant): self._consider_constant(arg.concretetype, arg.value) - def _consider_constant(self, type_, value): - if type_ is llmemory.Address: + def _consider_constant(self, type, value): + if type is llmemory.Address: value = value.ptr - type_ = lltype.typeOf(value) - if isinstance(type_, lltype.Ptr): - type_ = type_.TO + type = lltype.typeOf(value) + if isinstance(type, lltype.Ptr): + type = type.TO try: value = value._obj except lltype.DelayedPointer: @@ -1484,36 +1484,36 @@ return if value is None: return - if isinstance(type_, lltype.ContainerType): + if isinstance(type, lltype.ContainerType): if isinstance(value, int): return if value in self._considered_constant: return self._considered_constant.add(value) - if (isinstance(type_, lltype.Struct) and + if (isinstance(type, lltype.Struct) and not isinstance(value, lltype._subarray)): - for f in type_._names: - self._consider_constant(type_._flds[f], getattr(value, f)) - elif isinstance(type_, lltype.Array): + for f in type._names: + self._consider_constant(type._flds[f], getattr(value, f)) + elif isinstance(type, lltype.Array): if isinstance(value, _array_mixin): len_ = len(value.items) items = [value.getitem(i) for i in xrange(len_)] else: items = value.items for i in items: - self._consider_constant(type_.OF, i) - elif type_ is lltype.RuntimeTypeInfo: + self._consider_constant(type.OF, i) + elif type is lltype.RuntimeTypeInfo: if isinstance(self.gctransformer, RefcountingGCTransformer): self.gctransformer.static_deallocation_funcptr_for_type( value.about) - elif type_ is llmemory.GCREF.TO and hasattr(value, 'container'): + elif type is llmemory.GCREF.TO and hasattr(value, 'container'): self._consider_constant(value.ORIGTYPE.TO, value.container) - elif type_ is llmemory.WeakRef: + elif type is llmemory.WeakRef: from rpython.memory.gctypelayout import convert_weakref_to wrapper = convert_weakref_to(value._dereference()) self._consider_constant(wrapper._TYPE, wrapper) value._converted_weakref = wrapper - self.gctransformer.consider_constant(type_, value) + self.gctransformer.consider_constant(type, value) p, c = lltype.parentlink(value) if p: @@ -1584,7 +1584,7 @@ class RefcountGCPolicy(GCPolicy): class RttiType(FuncType): - def setup_from_lltype(self, db, type_): + def setup_from_lltype(self, db, type): self.result = LLVMVoid self.args = [LLVMAddress] @@ -1719,12 +1719,12 @@ ' block0:\n' ' br i1 %x, label %block1, label %block2\n' ' block1:\n' - ' call void {raise_.V}({type_.TV}, {inst.TV})\n' + ' call void {raise_.V}({type.TV}, {inst.TV})\n' ' ret void\n' ' block2:\n' ' ret void\n' '}}\n'.format(raise_=get_repr(exctrans.rpyexc_raise_ptr), - type_=get_repr(self.ovf_err[0]), + type=get_repr(self.ovf_err[0]), inst=get_repr(self.ovf_err[1]))) def gen_source(self): From noreply at buildbot.pypy.org Fri Mar 7 13:39:11 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 7 Mar 2014 13:39:11 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Don't try to parse LLVM datalayout. Message-ID: <20140307123911.6DC6B1C01F0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r69780:0e5975d13007 Date: 2014-03-07 13:37 +0100 http://bitbucket.org/pypy/pypy/changeset/0e5975d13007/ Log: Don't try to parse LLVM datalayout. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -24,6 +24,7 @@ from rpython.rtyper.lltypesystem.ll2ctypes import (_llvm_needs_header, _array_mixin) from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.tool.rffi_platform import memory_alignment from rpython.rtyper.typesystem import getfunctionptr from rpython.translator.backendopt.removenoops import remove_same_as from rpython.translator.backendopt.ssa import SSI_to_SSA @@ -35,7 +36,7 @@ database = None -align = None +align = memory_alignment() class Type(object): @@ -1700,13 +1701,6 @@ self.gcpolicy._consider_constant(ovf_err_inst._T, ovf_err_inst._obj) self.gcpolicy.finish() - def _parse_datalayout(self, output): - pointer = output.index('p:') - minus = output.index('-', pointer) - tmp = output[pointer:minus].split(':') - global align - align = int(tmp[3]) / 8 - def _write_special_declarations(self, f): f.write('declare void @abort() noreturn nounwind\n') f.write('declare void @llvm.gcroot(i8** %ptrloc, i8* %metadata)\n') @@ -1736,7 +1730,6 @@ with self.main_ll_file.open('w') as f: output = cmdexec('clang -emit-llvm -S -x c {} -o -' .format(devnull)) - self._parse_datalayout(output) for line in output.splitlines(True): if line.startswith('target '): f.write(line) From noreply at buildbot.pypy.org Fri Mar 7 15:51:47 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 7 Mar 2014 15:51:47 +0100 (CET) Subject: [pypy-commit] pypy numpypy-nditer: cleanup Message-ID: <20140307145147.B28221C01F0@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r69781:2b3b4c7e23b4 Date: 2014-03-07 14:53 +0200 http://bitbucket.org/pypy/pypy/changeset/2b3b4c7e23b4/ Log: cleanup diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -22,7 +22,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', - 'nditer': 'interp_nditer.nditer', + 'nditer': 'nditer.nditer', } diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -139,50 +139,30 @@ def setitem(self, elem): self.array.setitem(self.offset, elem) -class SliceIterator(object): +class SliceIterator(ArrayIter): def __init__(self, arr, strides, backstrides, shape, order="C", backward=False, dtype=None): - self.indexes = [0] * (len(shape) - 1) - self.offset = 0 - self.arr = arr if dtype is None: dtype = arr.implementation.dtype + self.dtype = dtype if backward: self.slicesize = shape[0] self.gap = [support.product(shape[1:]) * dtype.elsize] - self.strides = strides[1:] - self.backstrides = backstrides[1:] - self.shape = shape[1:] - self.strides.reverse() - self.backstrides.reverse() - self.shape.reverse() - self.shapelen = len(self.shape) + strides = strides[1:] + backstrides = backstrides[1:] + shape = shape[1:] + strides.reverse() + backstrides.reverse() + shape.reverse() + size = support.product(shape) else: - self.shape = [support.product(shape)] - self.strides, self.backstrides = calc_strides(shape, dtype, order) + shape = [support.product(shape)] + strides, backstrides = calc_strides(shape, dtype, order) + size = 0 self.slicesize = support.product(shape) - self.shapelen = 0 - self.gap = self.strides - self.dtype = dtype - self._done = False + self.gap = strides - def done(self): - return self._done - - @jit.unroll_safe - def next(self): - offset = self.offset - for i in range(self.shapelen - 1, -1, -1): - if self.indexes[i] < self.shape[i] - 1: - self.indexes[i] += 1 - offset += self.strides[i] - break - else: - self.indexes[i] = 0 - offset -= self.backstrides[i] - else: - self._done = True - self.offset = offset + ArrayIter.__init__(self, arr.implementation, size, shape, strides, backstrides) def getslice(self): from pypy.module.micronumpy.concrete import SliceArray diff --git a/pypy/module/micronumpy/interp_nditer.py b/pypy/module/micronumpy/nditer.py rename from pypy/module/micronumpy/interp_nditer.py rename to pypy/module/micronumpy/nditer.py From noreply at buildbot.pypy.org Fri Mar 7 15:51:48 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 7 Mar 2014 15:51:48 +0100 (CET) Subject: [pypy-commit] pypy numpypy-nditer: cleanup, fix (almost all) -A applevel issues Message-ID: <20140307145148.F318A1C01F0@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r69782:160a895c16a7 Date: 2014-03-07 16:48 +0200 http://bitbucket.org/pypy/pypy/changeset/160a895c16a7/ Log: cleanup, fix (almost all) -A applevel issues diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -145,6 +145,7 @@ if dtype is None: dtype = arr.implementation.dtype self.dtype = dtype + self.arr = arr if backward: self.slicesize = shape[0] self.gap = [support.product(shape[1:]) * dtype.elsize] @@ -158,7 +159,7 @@ else: shape = [support.product(shape)] strides, backstrides = calc_strides(shape, dtype, order) - size = 0 + size = 1 self.slicesize = support.product(shape) self.gap = strides diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -149,6 +149,8 @@ if item == 'external_loop': nditer.external_loop = True elif item == 'buffered': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer buffered not implemented yet')) nditer.buffered = True elif item == 'c_index': nditer.tracked_index = 'C' @@ -167,6 +169,8 @@ elif item == 'refs_ok': nditer.refs_ok = True elif item == 'reduce_ok': + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer reduce_ok not implemented yet')) nditer.reduce_ok = True elif item == 'zerosize_ok': nditer.zerosize_ok = True @@ -270,6 +274,9 @@ len(self.seq), parse_op_flag) if not space.is_none(w_op_axes): self.set_op_axes(space, w_op_axes) + if not space.is_none(w_op_dtypes): + raise OperationError(space.w_NotImplementedError, space.wrap( + 'nditer op_dtypes kwarg not implemented yet')) self.iters=[] self.shape = iter_shape = shape_agreement_multiple(space, self.seq) if self.tracked_index != "": diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -1,14 +1,16 @@ import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest +from pypy.conftest import option def raises(*args, **kwargs): #sometimes ValueError, sometimes TypeError, but we don't really care which - exc = py.test.raises((ValueError, TypeError), *args, **kwargs) + exc = py.test.raises((ValueError, TypeError), *args[1:], **kwargs) return exc class AppTestNDIter(BaseNumpyAppTest): def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) + cls.w_runAppDirect = cls.space.wrap(option.runappdirect) def test_basic(self): from numpy import arange, nditer @@ -111,26 +113,38 @@ it.iternext() assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] it = nditer(a, flags=['multi_index'], op_flags=['writeonly']) + if not self.runAppDirect: + raises(NotImplementedError, 'it[0] = 3') + skip('nditer.__setitem__ not implmented') while not it.finished: it[0] = it.multi_index[1] - it.multi_index[0] it.iternext() assert (a == [[0, 1, 2], [-1, 0, 1]]).all() b = zeros((2, 3)) - exc = raises(nditer, b, flags=['c_index', 'external_loop']) + exc = raises(ValueError, nditer, b, flags=['c_index', 'external_loop']) assert str(exc.value).startswith("Iterator flag EXTERNAL_LOOP cannot") def test_buffered(self): from numpy import arange, nditer, array a = arange(6).reshape(2,3) r = [] + if not self.runAppDirect: + raises(NotImplementedError, "nditer(a, flags=['external_loop', 'buffered'], order='F')") + skip('nditer buffered flag not implmented') for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): r.append(x) - assert (array(r) == [0, 3, 1, 4, 2, 5]).all() + array_r = array(r) + assert len(array_r.shape) == 2 + assert array_r.shape == (1, 6) + assert (array_r == [0, 3, 1, 4, 2, 5]).all() def test_op_dtype(self): from numpy import arange, nditer, sqrt, array a = arange(6).reshape(2,3) - 3 - exc = raises(nditer, a, op_dtypes=['complex']) + if not self.runAppDirect: + raises(NotImplementedError, nditer, a, op_dtypes=['complex']) + skip('nditer op_dtypes kwarg not implemented yet') + exc = raises(ValueError, nditer, a, op_dtypes=['complex']) assert str(exc.value).startswith("Iterator operand required copying or buffering") r = [] for x in nditer(a, op_flags=['readonly','copy'], @@ -148,6 +162,9 @@ def test_casting(self): from numpy import arange, nditer a = arange(6.) + if not self.runAppDirect: + raises(NotImplementedError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + skip('nditer casting not implemented yet') exc = raises(ValueError, nditer, a, flags=['buffered'], op_dtypes=['float32']) assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") r = [] @@ -198,7 +215,7 @@ c = square2([1, 2, 3], out=b) assert (c == [1., 4., 9.]).all() assert (b == c).all() - exc = raises(square2, arange(6).reshape(2, 3), out=b) + exc = raises(ValueError, square2, arange(6).reshape(2, 3), out=b) assert str(exc.value).startswith('non-broadcastable output') def test_outer_product(self): @@ -217,6 +234,9 @@ from numpy import nditer, arange, array a = arange(24).reshape(2, 3, 4) b = array(0) + if not self.runAppDirect: + raises(NotImplementedError, nditer, [a, b], flags=['reduce_ok']) + skip('nditer reduce_ok not implemented yet') #reduction operands must be readwrite for x, y in nditer([a, b], flags=['reduce_ok', 'external_loop'], op_flags=[['readonly'], ['readwrite']]): From noreply at buildbot.pypy.org Fri Mar 7 16:02:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 7 Mar 2014 16:02:36 +0100 (CET) Subject: [pypy-commit] pypy numpypy-nditer: more cleanup Message-ID: <20140307150236.594911C13C0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpypy-nditer Changeset: r69783:be3dc8958516 Date: 2014-03-07 10:01 -0500 http://bitbucket.org/pypy/pypy/changeset/be3dc8958516/ Log: more cleanup we want to condition tests on being on pypy, not on runappdirect -- they will still fail runappdirect against pypy diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -8,6 +8,7 @@ from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator from pypy.module.micronumpy.concrete import SliceArray + class AbstractIterator(object): def done(self): raise NotImplementedError("Abstract Class") diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -1,17 +1,8 @@ import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.conftest import option -def raises(*args, **kwargs): - #sometimes ValueError, sometimes TypeError, but we don't really care which - exc = py.test.raises((ValueError, TypeError), *args[1:], **kwargs) - return exc class AppTestNDIter(BaseNumpyAppTest): - def setup_class(cls): - BaseNumpyAppTest.setup_class.im_func(cls) - cls.w_runAppDirect = cls.space.wrap(option.runappdirect) - def test_basic(self): from numpy import arange, nditer a = arange(6).reshape(2,3) @@ -105,6 +96,7 @@ def test_interface(self): from numpy import arange, nditer, zeros + import sys a = arange(6).reshape(2,3) r = [] it = nditer(a, flags=['f_index']) @@ -113,7 +105,7 @@ it.iternext() assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] it = nditer(a, flags=['multi_index'], op_flags=['writeonly']) - if not self.runAppDirect: + if '__pypy__' in sys.builtin_module_names: raises(NotImplementedError, 'it[0] = 3') skip('nditer.__setitem__ not implmented') while not it.finished: @@ -126,9 +118,10 @@ def test_buffered(self): from numpy import arange, nditer, array + import sys a = arange(6).reshape(2,3) r = [] - if not self.runAppDirect: + if '__pypy__' in sys.builtin_module_names: raises(NotImplementedError, "nditer(a, flags=['external_loop', 'buffered'], order='F')") skip('nditer buffered flag not implmented') for x in nditer(a, flags=['external_loop', 'buffered'], order='F'): @@ -140,8 +133,9 @@ def test_op_dtype(self): from numpy import arange, nditer, sqrt, array + import sys a = arange(6).reshape(2,3) - 3 - if not self.runAppDirect: + if '__pypy__' in sys.builtin_module_names: raises(NotImplementedError, nditer, a, op_dtypes=['complex']) skip('nditer op_dtypes kwarg not implemented yet') exc = raises(ValueError, nditer, a, op_dtypes=['complex']) @@ -161,8 +155,9 @@ def test_casting(self): from numpy import arange, nditer + import sys a = arange(6.) - if not self.runAppDirect: + if '__pypy__' in sys.builtin_module_names: raises(NotImplementedError, nditer, a, flags=['buffered'], op_dtypes=['float32']) skip('nditer casting not implemented yet') exc = raises(ValueError, nditer, a, flags=['buffered'], op_dtypes=['float32']) @@ -232,9 +227,10 @@ def test_reduction(self): from numpy import nditer, arange, array + import sys a = arange(24).reshape(2, 3, 4) b = array(0) - if not self.runAppDirect: + if '__pypy__' in sys.builtin_module_names: raises(NotImplementedError, nditer, [a, b], flags=['reduce_ok']) skip('nditer reduce_ok not implemented yet') #reduction operands must be readwrite @@ -276,14 +272,12 @@ def test_multi_index(self): import numpy as np - a = np.arange(6).reshape(2, 3) - it = np.nditer(a, flags=['multi_index']) - res = [] while not it.finished: res.append((it[0], it.multi_index)) it.iternext() - - assert res == [(0, (0, 0)), (1, (0, 1)), (2, (0, 2)), (3, (1, 0)), (4, (1, 1)), (5, (1, 2))] + assert res == [(0, (0, 0)), (1, (0, 1)), + (2, (0, 2)), (3, (1, 0)), + (4, (1, 1)), (5, (1, 2))] From noreply at buildbot.pypy.org Fri Mar 7 17:52:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 7 Mar 2014 17:52:00 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: wip: checksignals in _file Message-ID: <20140307165200.3B8711C0906@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69784:a4119279c94c Date: 2014-03-07 11:49 -0500 http://bitbucket.org/pypy/pypy/changeset/a4119279c94c/ Log: wip: checksignals in _file diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -109,12 +109,13 @@ # file lock. They don't convert StreamErrors to OperationErrors, too. @unwrap_spec(mode=str, buffering=int) - def direct___init__(self, w_name, mode='r', buffering=-1): + def direct___init__(self, space, w_name, mode='r', buffering=-1): self.direct_close() self.w_name = w_name self.check_mode_ok(mode) stream = dispatch_filename(streamio.open_file_as_stream)( - self.space, w_name, mode, buffering) + self.space, w_name, mode, buffering, + space.getexecutioncontext().checksignals) fd = stream.try_to_find_file_descriptor() self.check_not_dir(fd) self.fdopenstream(stream, fd, mode) diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -76,9 +76,9 @@ @specialize.argtype(0) -def open_file_as_stream(path, mode="r", buffering=-1): +def open_file_as_stream(path, mode="r", buffering=-1, signal_checker=None): os_flags, universal, reading, writing, basemode, binary = decode_mode(mode) - stream = open_path_helper(path, os_flags, basemode == "a") + stream = open_path_helper(path, os_flags, basemode == "a", signal_checker) return construct_stream_tower(stream, buffering, universal, reading, writing, binary) @@ -95,7 +95,7 @@ writing, binary) @specialize.argtype(0) -def open_path_helper(path, os_flags, append): +def open_path_helper(path, os_flags, append, signal_checker=None): # XXX for now always return DiskFile fd = rposix.open(path, os_flags, 0666) if append: @@ -104,7 +104,7 @@ except OSError: # XXX does this pass make sense? pass - return DiskFile(fd) + return DiskFile(fd, signal_checker) def decode_mode(mode): if mode[0] == 'U': @@ -277,8 +277,9 @@ class DiskFile(Stream): """Standard I/O basis stream using os.open/close/read/write/lseek""" - def __init__(self, fd): + def __init__(self, fd, signal_checker=None): self.fd = fd + self.signal_checker = signal_checker def seek(self, offset, whence): os.lseek(self.fd, offset, whence) @@ -294,6 +295,8 @@ except OSError, e: if e.errno != errno.EINTR: raise + if self.signal_checker is not None: + self.signal_checker() # else try again def readline(self): From noreply at buildbot.pypy.org Fri Mar 7 17:59:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 7 Mar 2014 17:59:41 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: another approach Message-ID: <20140307165941.78B7B1C0906@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69785:7db678d05e15 Date: 2014-03-07 11:59 -0500 http://bitbucket.org/pypy/pypy/changeset/7db678d05e15/ Log: another approach diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -3,6 +3,7 @@ import stat import errno from rpython.rlib import streamio +from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.rstring import StringBuilder from pypy.module._file.interp_stream import W_AbstractStream, StreamErrors @@ -13,6 +14,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror + class W_File(W_AbstractStream): """An interp-level file object. This implements the same interface than the app-level files, with the following differences: @@ -115,7 +117,7 @@ self.check_mode_ok(mode) stream = dispatch_filename(streamio.open_file_as_stream)( self.space, w_name, mode, buffering, - space.getexecutioncontext().checksignals) + signal_checker(space)) fd = stream.try_to_find_file_descriptor() self.check_not_dir(fd) self.fdopenstream(stream, fd, mode) @@ -577,6 +579,10 @@ def getopenstreams(space): return space.fromcache(FileState).openstreams + at specialize.memo() +def signal_checker(space): + return space.getexecutioncontext().checksignals + MAYBE_EAGAIN = getattr(errno, 'EAGAIN', None) MAYBE_EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', None) From noreply at buildbot.pypy.org Fri Mar 7 18:16:11 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 7 Mar 2014 18:16:11 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: fix signal_checker, use in streamio Message-ID: <20140307171611.A12131C01F0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69786:cf3d6a026601 Date: 2014-03-07 12:15 -0500 http://bitbucket.org/pypy/pypy/changeset/cf3d6a026601/ Log: fix signal_checker, use in streamio diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -111,13 +111,12 @@ # file lock. They don't convert StreamErrors to OperationErrors, too. @unwrap_spec(mode=str, buffering=int) - def direct___init__(self, space, w_name, mode='r', buffering=-1): + def direct___init__(self, w_name, mode='r', buffering=-1): self.direct_close() self.w_name = w_name self.check_mode_ok(mode) stream = dispatch_filename(streamio.open_file_as_stream)( - self.space, w_name, mode, buffering, - signal_checker(space)) + self.space, w_name, mode, buffering, signal_checker(self.space)) fd = stream.try_to_find_file_descriptor() self.check_not_dir(fd) self.fdopenstream(stream, fd, mode) @@ -136,7 +135,8 @@ self.direct_close() self.w_name = self.space.wrap('') self.check_mode_ok(mode) - stream = streamio.fdopen_as_stream(fd, mode, buffering) + stream = streamio.fdopen_as_stream(fd, mode, buffering, + signal_checker(self.space)) self.fdopenstream(stream, fd, mode) def direct_close(self): @@ -581,7 +581,9 @@ @specialize.memo() def signal_checker(space): - return space.getexecutioncontext().checksignals + def checksignals(): + space.getexecutioncontext().checksignals() + return checksignals MAYBE_EAGAIN = getattr(errno, 'EAGAIN', None) MAYBE_EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', None) diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -85,12 +85,12 @@ def _setfd_binary(fd): pass -def fdopen_as_stream(fd, mode, buffering=-1): +def fdopen_as_stream(fd, mode, buffering=-1, signal_checker=None): # XXX XXX XXX you want do check whether the modes are compatible # otherwise you get funny results os_flags, universal, reading, writing, basemode, binary = decode_mode(mode) _setfd_binary(fd) - stream = DiskFile(fd) + stream = DiskFile(fd, signal_checker) return construct_stream_tower(stream, buffering, universal, reading, writing, binary) @@ -309,8 +309,9 @@ except OSError, e: if e.errno != errno.EINTR: raise - else: - continue # try again + if self.signal_checker is not None: + self.signal_checker() + continue # try again if not c: break c = c[0] @@ -326,6 +327,8 @@ except OSError, e: if e.errno != errno.EINTR: raise + if self.signal_checker is not None: + self.signal_checker() else: data = data[n:] From noreply at buildbot.pypy.org Fri Mar 7 20:00:45 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 7 Mar 2014 20:00:45 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.6: close branch for merging Message-ID: <20140307190045.D9BE91C13AF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.6 Changeset: r69787:a2a4b9ff79bd Date: 2014-03-07 13:57 -0500 http://bitbucket.org/pypy/pypy/changeset/a2a4b9ff79bd/ Log: close branch for merging From noreply at buildbot.pypy.org Fri Mar 7 20:45:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 20:45:11 +0100 (CET) Subject: [pypy-commit] pypy default: Patch by dmlockhart: support delattr() on ast node objects. Message-ID: <20140307194511.DF6811C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69790:2a86e6faaf0f Date: 2014-03-07 20:43 +0100 http://bitbucket.org/pypy/pypy/changeset/2a86e6faaf0f/ Log: Patch by dmlockhart: support delattr() on ast node objects. diff too long, truncating to 2000 out of 2419 lines diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -2815,6 +2815,12 @@ w_self.w_body = w_new_value w_self.initialization_state |= 1 +def Module_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + Module_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~1 + _Module_field_unroller = unrolling_iterable(['body']) def Module_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Module, w_self) @@ -2835,7 +2841,7 @@ mod.typedef, __module__='_ast', _fields=_FieldsWrapper(['body']), - body=typedef.GetSetProperty(Module_get_body, Module_set_body, cls=Module), + body=typedef.GetSetProperty(Module_get_body, Module_set_body, Module_del_body, cls=Module), __new__=interp2app(get_AST_new(Module)), __init__=interp2app(Module_init), ) @@ -2856,6 +2862,12 @@ w_self.w_body = w_new_value w_self.initialization_state |= 1 +def Interactive_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + Interactive_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~1 + _Interactive_field_unroller = unrolling_iterable(['body']) def Interactive_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Interactive, w_self) @@ -2876,7 +2888,7 @@ mod.typedef, __module__='_ast', _fields=_FieldsWrapper(['body']), - body=typedef.GetSetProperty(Interactive_get_body, Interactive_set_body, cls=Interactive), + body=typedef.GetSetProperty(Interactive_get_body, Interactive_set_body, Interactive_del_body, cls=Interactive), __new__=interp2app(get_AST_new(Interactive)), __init__=interp2app(Interactive_init), ) @@ -2904,6 +2916,12 @@ w_self.deldictvalue(space, 'body') w_self.initialization_state |= 1 +def Expression_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + Expression_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~1 + _Expression_field_unroller = unrolling_iterable(['body']) def Expression_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Expression, w_self) @@ -2923,7 +2941,7 @@ mod.typedef, __module__='_ast', _fields=_FieldsWrapper(['body']), - body=typedef.GetSetProperty(Expression_get_body, Expression_set_body, cls=Expression), + body=typedef.GetSetProperty(Expression_get_body, Expression_set_body, Expression_del_body, cls=Expression), __new__=interp2app(get_AST_new(Expression)), __init__=interp2app(Expression_init), ) @@ -2944,6 +2962,12 @@ w_self.w_body = w_new_value w_self.initialization_state |= 1 +def Suite_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + Suite_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~1 + _Suite_field_unroller = unrolling_iterable(['body']) def Suite_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Suite, w_self) @@ -2964,7 +2988,7 @@ mod.typedef, __module__='_ast', _fields=_FieldsWrapper(['body']), - body=typedef.GetSetProperty(Suite_get_body, Suite_set_body, cls=Suite), + body=typedef.GetSetProperty(Suite_get_body, Suite_set_body, Suite_del_body, cls=Suite), __new__=interp2app(get_AST_new(Suite)), __init__=interp2app(Suite_init), ) @@ -2990,6 +3014,12 @@ w_self.deldictvalue(space, 'lineno') w_self.initialization_state |= 1 +def stmt_del_lineno(space, w_self): + # Check if the element exists, raise appropriate exceptions + stmt_get_lineno(space, w_self) + w_self.deldictvalue(space, 'lineno') + w_self.initialization_state &= ~1 + def stmt_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') @@ -3011,12 +3041,18 @@ w_self.deldictvalue(space, 'col_offset') w_self.initialization_state |= 2 +def stmt_del_col_offset(space, w_self): + # Check if the element exists, raise appropriate exceptions + stmt_get_col_offset(space, w_self) + w_self.deldictvalue(space, 'col_offset') + w_self.initialization_state &= ~2 + stmt.typedef = typedef.TypeDef("stmt", AST.typedef, __module__='_ast', _attributes=_FieldsWrapper(['lineno', 'col_offset']), - lineno=typedef.GetSetProperty(stmt_get_lineno, stmt_set_lineno, cls=stmt), - col_offset=typedef.GetSetProperty(stmt_get_col_offset, stmt_set_col_offset, cls=stmt), + lineno=typedef.GetSetProperty(stmt_get_lineno, stmt_set_lineno, stmt_del_lineno, cls=stmt), + col_offset=typedef.GetSetProperty(stmt_get_col_offset, stmt_set_col_offset, stmt_del_col_offset, cls=stmt), __new__=interp2app(get_AST_new(stmt)), ) @@ -3041,6 +3077,12 @@ w_self.deldictvalue(space, 'name') w_self.initialization_state |= 4 +def FunctionDef_del_name(space, w_self): + # Check if the element exists, raise appropriate exceptions + FunctionDef_get_name(space, w_self) + w_self.deldictvalue(space, 'name') + w_self.initialization_state &= ~4 + def FunctionDef_get_args(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'args') @@ -3062,6 +3104,12 @@ w_self.deldictvalue(space, 'args') w_self.initialization_state |= 8 +def FunctionDef_del_args(space, w_self): + # Check if the element exists, raise appropriate exceptions + FunctionDef_get_args(space, w_self) + w_self.deldictvalue(space, 'args') + w_self.initialization_state &= ~8 + def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 16: raise_attriberr(space, w_self, 'body') @@ -3078,6 +3126,12 @@ w_self.w_body = w_new_value w_self.initialization_state |= 16 +def FunctionDef_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + FunctionDef_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~16 + def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: raise_attriberr(space, w_self, 'decorator_list') @@ -3094,6 +3148,12 @@ w_self.w_decorator_list = w_new_value w_self.initialization_state |= 32 +def FunctionDef_del_decorator_list(space, w_self): + # Check if the element exists, raise appropriate exceptions + FunctionDef_get_decorator_list(space, w_self) + w_self.deldictvalue(space, 'decorator_list') + w_self.initialization_state &= ~32 + _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list']) def FunctionDef_init(space, w_self, __args__): w_self = space.descr_self_interp_w(FunctionDef, w_self) @@ -3115,10 +3175,10 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['name', 'args', 'body', 'decorator_list']), - name=typedef.GetSetProperty(FunctionDef_get_name, FunctionDef_set_name, cls=FunctionDef), - args=typedef.GetSetProperty(FunctionDef_get_args, FunctionDef_set_args, cls=FunctionDef), - body=typedef.GetSetProperty(FunctionDef_get_body, FunctionDef_set_body, cls=FunctionDef), - decorator_list=typedef.GetSetProperty(FunctionDef_get_decorator_list, FunctionDef_set_decorator_list, cls=FunctionDef), + name=typedef.GetSetProperty(FunctionDef_get_name, FunctionDef_set_name, FunctionDef_del_name, cls=FunctionDef), + args=typedef.GetSetProperty(FunctionDef_get_args, FunctionDef_set_args, FunctionDef_del_args, cls=FunctionDef), + body=typedef.GetSetProperty(FunctionDef_get_body, FunctionDef_set_body, FunctionDef_del_body, cls=FunctionDef), + decorator_list=typedef.GetSetProperty(FunctionDef_get_decorator_list, FunctionDef_set_decorator_list, FunctionDef_del_decorator_list, cls=FunctionDef), __new__=interp2app(get_AST_new(FunctionDef)), __init__=interp2app(FunctionDef_init), ) @@ -3144,6 +3204,12 @@ w_self.deldictvalue(space, 'name') w_self.initialization_state |= 4 +def ClassDef_del_name(space, w_self): + # Check if the element exists, raise appropriate exceptions + ClassDef_get_name(space, w_self) + w_self.deldictvalue(space, 'name') + w_self.initialization_state &= ~4 + def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'bases') @@ -3160,6 +3226,12 @@ w_self.w_bases = w_new_value w_self.initialization_state |= 8 +def ClassDef_del_bases(space, w_self): + # Check if the element exists, raise appropriate exceptions + ClassDef_get_bases(space, w_self) + w_self.deldictvalue(space, 'bases') + w_self.initialization_state &= ~8 + def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 16: raise_attriberr(space, w_self, 'body') @@ -3176,6 +3248,12 @@ w_self.w_body = w_new_value w_self.initialization_state |= 16 +def ClassDef_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + ClassDef_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~16 + def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: raise_attriberr(space, w_self, 'decorator_list') @@ -3192,6 +3270,12 @@ w_self.w_decorator_list = w_new_value w_self.initialization_state |= 32 +def ClassDef_del_decorator_list(space, w_self): + # Check if the element exists, raise appropriate exceptions + ClassDef_get_decorator_list(space, w_self) + w_self.deldictvalue(space, 'decorator_list') + w_self.initialization_state &= ~32 + _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list']) def ClassDef_init(space, w_self, __args__): w_self = space.descr_self_interp_w(ClassDef, w_self) @@ -3214,10 +3298,10 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['name', 'bases', 'body', 'decorator_list']), - name=typedef.GetSetProperty(ClassDef_get_name, ClassDef_set_name, cls=ClassDef), - bases=typedef.GetSetProperty(ClassDef_get_bases, ClassDef_set_bases, cls=ClassDef), - body=typedef.GetSetProperty(ClassDef_get_body, ClassDef_set_body, cls=ClassDef), - decorator_list=typedef.GetSetProperty(ClassDef_get_decorator_list, ClassDef_set_decorator_list, cls=ClassDef), + name=typedef.GetSetProperty(ClassDef_get_name, ClassDef_set_name, ClassDef_del_name, cls=ClassDef), + bases=typedef.GetSetProperty(ClassDef_get_bases, ClassDef_set_bases, ClassDef_del_bases, cls=ClassDef), + body=typedef.GetSetProperty(ClassDef_get_body, ClassDef_set_body, ClassDef_del_body, cls=ClassDef), + decorator_list=typedef.GetSetProperty(ClassDef_get_decorator_list, ClassDef_set_decorator_list, ClassDef_del_decorator_list, cls=ClassDef), __new__=interp2app(get_AST_new(ClassDef)), __init__=interp2app(ClassDef_init), ) @@ -3245,6 +3329,12 @@ w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 +def Return_del_value(space, w_self): + # Check if the element exists, raise appropriate exceptions + Return_get_value(space, w_self) + w_self.deldictvalue(space, 'value') + w_self.initialization_state &= ~4 + _Return_field_unroller = unrolling_iterable(['value']) def Return_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Return, w_self) @@ -3264,7 +3354,7 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['value']), - value=typedef.GetSetProperty(Return_get_value, Return_set_value, cls=Return), + value=typedef.GetSetProperty(Return_get_value, Return_set_value, Return_del_value, cls=Return), __new__=interp2app(get_AST_new(Return)), __init__=interp2app(Return_init), ) @@ -3285,6 +3375,12 @@ w_self.w_targets = w_new_value w_self.initialization_state |= 4 +def Delete_del_targets(space, w_self): + # Check if the element exists, raise appropriate exceptions + Delete_get_targets(space, w_self) + w_self.deldictvalue(space, 'targets') + w_self.initialization_state &= ~4 + _Delete_field_unroller = unrolling_iterable(['targets']) def Delete_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Delete, w_self) @@ -3305,7 +3401,7 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['targets']), - targets=typedef.GetSetProperty(Delete_get_targets, Delete_set_targets, cls=Delete), + targets=typedef.GetSetProperty(Delete_get_targets, Delete_set_targets, Delete_del_targets, cls=Delete), __new__=interp2app(get_AST_new(Delete)), __init__=interp2app(Delete_init), ) @@ -3326,6 +3422,12 @@ w_self.w_targets = w_new_value w_self.initialization_state |= 4 +def Assign_del_targets(space, w_self): + # Check if the element exists, raise appropriate exceptions + Assign_get_targets(space, w_self) + w_self.deldictvalue(space, 'targets') + w_self.initialization_state &= ~4 + def Assign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') @@ -3349,6 +3451,12 @@ w_self.deldictvalue(space, 'value') w_self.initialization_state |= 8 +def Assign_del_value(space, w_self): + # Check if the element exists, raise appropriate exceptions + Assign_get_value(space, w_self) + w_self.deldictvalue(space, 'value') + w_self.initialization_state &= ~8 + _Assign_field_unroller = unrolling_iterable(['targets', 'value']) def Assign_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Assign, w_self) @@ -3369,8 +3477,8 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['targets', 'value']), - targets=typedef.GetSetProperty(Assign_get_targets, Assign_set_targets, cls=Assign), - value=typedef.GetSetProperty(Assign_get_value, Assign_set_value, cls=Assign), + targets=typedef.GetSetProperty(Assign_get_targets, Assign_set_targets, Assign_del_targets, cls=Assign), + value=typedef.GetSetProperty(Assign_get_value, Assign_set_value, Assign_del_value, cls=Assign), __new__=interp2app(get_AST_new(Assign)), __init__=interp2app(Assign_init), ) @@ -3398,6 +3506,12 @@ w_self.deldictvalue(space, 'target') w_self.initialization_state |= 4 +def AugAssign_del_target(space, w_self): + # Check if the element exists, raise appropriate exceptions + AugAssign_get_target(space, w_self) + w_self.deldictvalue(space, 'target') + w_self.initialization_state &= ~4 + def AugAssign_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') @@ -3421,6 +3535,12 @@ w_self.setdictvalue(space, 'op', w_new_value) w_self.initialization_state |= 8 +def AugAssign_del_op(space, w_self): + # Check if the element exists, raise appropriate exceptions + AugAssign_get_op(space, w_self) + w_self.deldictvalue(space, 'op') + w_self.initialization_state &= ~8 + def AugAssign_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') @@ -3444,6 +3564,12 @@ w_self.deldictvalue(space, 'value') w_self.initialization_state |= 16 +def AugAssign_del_value(space, w_self): + # Check if the element exists, raise appropriate exceptions + AugAssign_get_value(space, w_self) + w_self.deldictvalue(space, 'value') + w_self.initialization_state &= ~16 + _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) def AugAssign_init(space, w_self, __args__): w_self = space.descr_self_interp_w(AugAssign, w_self) @@ -3463,9 +3589,9 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['target', 'op', 'value']), - target=typedef.GetSetProperty(AugAssign_get_target, AugAssign_set_target, cls=AugAssign), - op=typedef.GetSetProperty(AugAssign_get_op, AugAssign_set_op, cls=AugAssign), - value=typedef.GetSetProperty(AugAssign_get_value, AugAssign_set_value, cls=AugAssign), + target=typedef.GetSetProperty(AugAssign_get_target, AugAssign_set_target, AugAssign_del_target, cls=AugAssign), + op=typedef.GetSetProperty(AugAssign_get_op, AugAssign_set_op, AugAssign_del_op, cls=AugAssign), + value=typedef.GetSetProperty(AugAssign_get_value, AugAssign_set_value, AugAssign_del_value, cls=AugAssign), __new__=interp2app(get_AST_new(AugAssign)), __init__=interp2app(AugAssign_init), ) @@ -3493,6 +3619,12 @@ w_self.deldictvalue(space, 'dest') w_self.initialization_state |= 4 +def Print_del_dest(space, w_self): + # Check if the element exists, raise appropriate exceptions + Print_get_dest(space, w_self) + w_self.deldictvalue(space, 'dest') + w_self.initialization_state &= ~4 + def Print_get_values(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'values') @@ -3509,6 +3641,12 @@ w_self.w_values = w_new_value w_self.initialization_state |= 8 +def Print_del_values(space, w_self): + # Check if the element exists, raise appropriate exceptions + Print_get_values(space, w_self) + w_self.deldictvalue(space, 'values') + w_self.initialization_state &= ~8 + def Print_get_nl(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'nl') @@ -3530,6 +3668,12 @@ w_self.deldictvalue(space, 'nl') w_self.initialization_state |= 16 +def Print_del_nl(space, w_self): + # Check if the element exists, raise appropriate exceptions + Print_get_nl(space, w_self) + w_self.deldictvalue(space, 'nl') + w_self.initialization_state &= ~16 + _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl']) def Print_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Print, w_self) @@ -3550,9 +3694,9 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['dest', 'values', 'nl']), - dest=typedef.GetSetProperty(Print_get_dest, Print_set_dest, cls=Print), - values=typedef.GetSetProperty(Print_get_values, Print_set_values, cls=Print), - nl=typedef.GetSetProperty(Print_get_nl, Print_set_nl, cls=Print), + dest=typedef.GetSetProperty(Print_get_dest, Print_set_dest, Print_del_dest, cls=Print), + values=typedef.GetSetProperty(Print_get_values, Print_set_values, Print_del_values, cls=Print), + nl=typedef.GetSetProperty(Print_get_nl, Print_set_nl, Print_del_nl, cls=Print), __new__=interp2app(get_AST_new(Print)), __init__=interp2app(Print_init), ) @@ -3580,6 +3724,12 @@ w_self.deldictvalue(space, 'target') w_self.initialization_state |= 4 +def For_del_target(space, w_self): + # Check if the element exists, raise appropriate exceptions + For_get_target(space, w_self) + w_self.deldictvalue(space, 'target') + w_self.initialization_state &= ~4 + def For_get_iter(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'iter') @@ -3603,6 +3753,12 @@ w_self.deldictvalue(space, 'iter') w_self.initialization_state |= 8 +def For_del_iter(space, w_self): + # Check if the element exists, raise appropriate exceptions + For_get_iter(space, w_self) + w_self.deldictvalue(space, 'iter') + w_self.initialization_state &= ~8 + def For_get_body(space, w_self): if not w_self.initialization_state & 16: raise_attriberr(space, w_self, 'body') @@ -3619,6 +3775,12 @@ w_self.w_body = w_new_value w_self.initialization_state |= 16 +def For_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + For_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~16 + def For_get_orelse(space, w_self): if not w_self.initialization_state & 32: raise_attriberr(space, w_self, 'orelse') @@ -3635,6 +3797,12 @@ w_self.w_orelse = w_new_value w_self.initialization_state |= 32 +def For_del_orelse(space, w_self): + # Check if the element exists, raise appropriate exceptions + For_get_orelse(space, w_self) + w_self.deldictvalue(space, 'orelse') + w_self.initialization_state &= ~32 + _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse']) def For_init(space, w_self, __args__): w_self = space.descr_self_interp_w(For, w_self) @@ -3656,10 +3824,10 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['target', 'iter', 'body', 'orelse']), - target=typedef.GetSetProperty(For_get_target, For_set_target, cls=For), - iter=typedef.GetSetProperty(For_get_iter, For_set_iter, cls=For), - body=typedef.GetSetProperty(For_get_body, For_set_body, cls=For), - orelse=typedef.GetSetProperty(For_get_orelse, For_set_orelse, cls=For), + target=typedef.GetSetProperty(For_get_target, For_set_target, For_del_target, cls=For), + iter=typedef.GetSetProperty(For_get_iter, For_set_iter, For_del_iter, cls=For), + body=typedef.GetSetProperty(For_get_body, For_set_body, For_del_body, cls=For), + orelse=typedef.GetSetProperty(For_get_orelse, For_set_orelse, For_del_orelse, cls=For), __new__=interp2app(get_AST_new(For)), __init__=interp2app(For_init), ) @@ -3687,6 +3855,12 @@ w_self.deldictvalue(space, 'test') w_self.initialization_state |= 4 +def While_del_test(space, w_self): + # Check if the element exists, raise appropriate exceptions + While_get_test(space, w_self) + w_self.deldictvalue(space, 'test') + w_self.initialization_state &= ~4 + def While_get_body(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'body') @@ -3703,6 +3877,12 @@ w_self.w_body = w_new_value w_self.initialization_state |= 8 +def While_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + While_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~8 + def While_get_orelse(space, w_self): if not w_self.initialization_state & 16: raise_attriberr(space, w_self, 'orelse') @@ -3719,6 +3899,12 @@ w_self.w_orelse = w_new_value w_self.initialization_state |= 16 +def While_del_orelse(space, w_self): + # Check if the element exists, raise appropriate exceptions + While_get_orelse(space, w_self) + w_self.deldictvalue(space, 'orelse') + w_self.initialization_state &= ~16 + _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def While_init(space, w_self, __args__): w_self = space.descr_self_interp_w(While, w_self) @@ -3740,9 +3926,9 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['test', 'body', 'orelse']), - test=typedef.GetSetProperty(While_get_test, While_set_test, cls=While), - body=typedef.GetSetProperty(While_get_body, While_set_body, cls=While), - orelse=typedef.GetSetProperty(While_get_orelse, While_set_orelse, cls=While), + test=typedef.GetSetProperty(While_get_test, While_set_test, While_del_test, cls=While), + body=typedef.GetSetProperty(While_get_body, While_set_body, While_del_body, cls=While), + orelse=typedef.GetSetProperty(While_get_orelse, While_set_orelse, While_del_orelse, cls=While), __new__=interp2app(get_AST_new(While)), __init__=interp2app(While_init), ) @@ -3770,6 +3956,12 @@ w_self.deldictvalue(space, 'test') w_self.initialization_state |= 4 +def If_del_test(space, w_self): + # Check if the element exists, raise appropriate exceptions + If_get_test(space, w_self) + w_self.deldictvalue(space, 'test') + w_self.initialization_state &= ~4 + def If_get_body(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'body') @@ -3786,6 +3978,12 @@ w_self.w_body = w_new_value w_self.initialization_state |= 8 +def If_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + If_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~8 + def If_get_orelse(space, w_self): if not w_self.initialization_state & 16: raise_attriberr(space, w_self, 'orelse') @@ -3802,6 +4000,12 @@ w_self.w_orelse = w_new_value w_self.initialization_state |= 16 +def If_del_orelse(space, w_self): + # Check if the element exists, raise appropriate exceptions + If_get_orelse(space, w_self) + w_self.deldictvalue(space, 'orelse') + w_self.initialization_state &= ~16 + _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def If_init(space, w_self, __args__): w_self = space.descr_self_interp_w(If, w_self) @@ -3823,9 +4027,9 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['test', 'body', 'orelse']), - test=typedef.GetSetProperty(If_get_test, If_set_test, cls=If), - body=typedef.GetSetProperty(If_get_body, If_set_body, cls=If), - orelse=typedef.GetSetProperty(If_get_orelse, If_set_orelse, cls=If), + test=typedef.GetSetProperty(If_get_test, If_set_test, If_del_test, cls=If), + body=typedef.GetSetProperty(If_get_body, If_set_body, If_del_body, cls=If), + orelse=typedef.GetSetProperty(If_get_orelse, If_set_orelse, If_del_orelse, cls=If), __new__=interp2app(get_AST_new(If)), __init__=interp2app(If_init), ) @@ -3853,6 +4057,12 @@ w_self.deldictvalue(space, 'context_expr') w_self.initialization_state |= 4 +def With_del_context_expr(space, w_self): + # Check if the element exists, raise appropriate exceptions + With_get_context_expr(space, w_self) + w_self.deldictvalue(space, 'context_expr') + w_self.initialization_state &= ~4 + def With_get_optional_vars(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'optional_vars') @@ -3876,6 +4086,12 @@ w_self.deldictvalue(space, 'optional_vars') w_self.initialization_state |= 8 +def With_del_optional_vars(space, w_self): + # Check if the element exists, raise appropriate exceptions + With_get_optional_vars(space, w_self) + w_self.deldictvalue(space, 'optional_vars') + w_self.initialization_state &= ~8 + def With_get_body(space, w_self): if not w_self.initialization_state & 16: raise_attriberr(space, w_self, 'body') @@ -3892,6 +4108,12 @@ w_self.w_body = w_new_value w_self.initialization_state |= 16 +def With_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + With_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~16 + _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body']) def With_init(space, w_self, __args__): w_self = space.descr_self_interp_w(With, w_self) @@ -3912,9 +4134,9 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['context_expr', 'optional_vars', 'body']), - context_expr=typedef.GetSetProperty(With_get_context_expr, With_set_context_expr, cls=With), - optional_vars=typedef.GetSetProperty(With_get_optional_vars, With_set_optional_vars, cls=With), - body=typedef.GetSetProperty(With_get_body, With_set_body, cls=With), + context_expr=typedef.GetSetProperty(With_get_context_expr, With_set_context_expr, With_del_context_expr, cls=With), + optional_vars=typedef.GetSetProperty(With_get_optional_vars, With_set_optional_vars, With_del_optional_vars, cls=With), + body=typedef.GetSetProperty(With_get_body, With_set_body, With_del_body, cls=With), __new__=interp2app(get_AST_new(With)), __init__=interp2app(With_init), ) @@ -3942,6 +4164,12 @@ w_self.deldictvalue(space, 'type') w_self.initialization_state |= 4 +def Raise_del_type(space, w_self): + # Check if the element exists, raise appropriate exceptions + Raise_get_type(space, w_self) + w_self.deldictvalue(space, 'type') + w_self.initialization_state &= ~4 + def Raise_get_inst(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'inst') @@ -3965,6 +4193,12 @@ w_self.deldictvalue(space, 'inst') w_self.initialization_state |= 8 +def Raise_del_inst(space, w_self): + # Check if the element exists, raise appropriate exceptions + Raise_get_inst(space, w_self) + w_self.deldictvalue(space, 'inst') + w_self.initialization_state &= ~8 + def Raise_get_tback(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'tback') @@ -3988,6 +4222,12 @@ w_self.deldictvalue(space, 'tback') w_self.initialization_state |= 16 +def Raise_del_tback(space, w_self): + # Check if the element exists, raise appropriate exceptions + Raise_get_tback(space, w_self) + w_self.deldictvalue(space, 'tback') + w_self.initialization_state &= ~16 + _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback']) def Raise_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Raise, w_self) @@ -4007,9 +4247,9 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['type', 'inst', 'tback']), - type=typedef.GetSetProperty(Raise_get_type, Raise_set_type, cls=Raise), - inst=typedef.GetSetProperty(Raise_get_inst, Raise_set_inst, cls=Raise), - tback=typedef.GetSetProperty(Raise_get_tback, Raise_set_tback, cls=Raise), + type=typedef.GetSetProperty(Raise_get_type, Raise_set_type, Raise_del_type, cls=Raise), + inst=typedef.GetSetProperty(Raise_get_inst, Raise_set_inst, Raise_del_inst, cls=Raise), + tback=typedef.GetSetProperty(Raise_get_tback, Raise_set_tback, Raise_del_tback, cls=Raise), __new__=interp2app(get_AST_new(Raise)), __init__=interp2app(Raise_init), ) @@ -4030,6 +4270,12 @@ w_self.w_body = w_new_value w_self.initialization_state |= 4 +def TryExcept_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + TryExcept_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~4 + def TryExcept_get_handlers(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'handlers') @@ -4046,6 +4292,12 @@ w_self.w_handlers = w_new_value w_self.initialization_state |= 8 +def TryExcept_del_handlers(space, w_self): + # Check if the element exists, raise appropriate exceptions + TryExcept_get_handlers(space, w_self) + w_self.deldictvalue(space, 'handlers') + w_self.initialization_state &= ~8 + def TryExcept_get_orelse(space, w_self): if not w_self.initialization_state & 16: raise_attriberr(space, w_self, 'orelse') @@ -4062,6 +4314,12 @@ w_self.w_orelse = w_new_value w_self.initialization_state |= 16 +def TryExcept_del_orelse(space, w_self): + # Check if the element exists, raise appropriate exceptions + TryExcept_get_orelse(space, w_self) + w_self.deldictvalue(space, 'orelse') + w_self.initialization_state &= ~16 + _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse']) def TryExcept_init(space, w_self, __args__): w_self = space.descr_self_interp_w(TryExcept, w_self) @@ -4084,9 +4342,9 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['body', 'handlers', 'orelse']), - body=typedef.GetSetProperty(TryExcept_get_body, TryExcept_set_body, cls=TryExcept), - handlers=typedef.GetSetProperty(TryExcept_get_handlers, TryExcept_set_handlers, cls=TryExcept), - orelse=typedef.GetSetProperty(TryExcept_get_orelse, TryExcept_set_orelse, cls=TryExcept), + body=typedef.GetSetProperty(TryExcept_get_body, TryExcept_set_body, TryExcept_del_body, cls=TryExcept), + handlers=typedef.GetSetProperty(TryExcept_get_handlers, TryExcept_set_handlers, TryExcept_del_handlers, cls=TryExcept), + orelse=typedef.GetSetProperty(TryExcept_get_orelse, TryExcept_set_orelse, TryExcept_del_orelse, cls=TryExcept), __new__=interp2app(get_AST_new(TryExcept)), __init__=interp2app(TryExcept_init), ) @@ -4107,6 +4365,12 @@ w_self.w_body = w_new_value w_self.initialization_state |= 4 +def TryFinally_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + TryFinally_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~4 + def TryFinally_get_finalbody(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'finalbody') @@ -4123,6 +4387,12 @@ w_self.w_finalbody = w_new_value w_self.initialization_state |= 8 +def TryFinally_del_finalbody(space, w_self): + # Check if the element exists, raise appropriate exceptions + TryFinally_get_finalbody(space, w_self) + w_self.deldictvalue(space, 'finalbody') + w_self.initialization_state &= ~8 + _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody']) def TryFinally_init(space, w_self, __args__): w_self = space.descr_self_interp_w(TryFinally, w_self) @@ -4144,8 +4414,8 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['body', 'finalbody']), - body=typedef.GetSetProperty(TryFinally_get_body, TryFinally_set_body, cls=TryFinally), - finalbody=typedef.GetSetProperty(TryFinally_get_finalbody, TryFinally_set_finalbody, cls=TryFinally), + body=typedef.GetSetProperty(TryFinally_get_body, TryFinally_set_body, TryFinally_del_body, cls=TryFinally), + finalbody=typedef.GetSetProperty(TryFinally_get_finalbody, TryFinally_set_finalbody, TryFinally_del_finalbody, cls=TryFinally), __new__=interp2app(get_AST_new(TryFinally)), __init__=interp2app(TryFinally_init), ) @@ -4173,6 +4443,12 @@ w_self.deldictvalue(space, 'test') w_self.initialization_state |= 4 +def Assert_del_test(space, w_self): + # Check if the element exists, raise appropriate exceptions + Assert_get_test(space, w_self) + w_self.deldictvalue(space, 'test') + w_self.initialization_state &= ~4 + def Assert_get_msg(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'msg') @@ -4196,6 +4472,12 @@ w_self.deldictvalue(space, 'msg') w_self.initialization_state |= 8 +def Assert_del_msg(space, w_self): + # Check if the element exists, raise appropriate exceptions + Assert_get_msg(space, w_self) + w_self.deldictvalue(space, 'msg') + w_self.initialization_state &= ~8 + _Assert_field_unroller = unrolling_iterable(['test', 'msg']) def Assert_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Assert, w_self) @@ -4215,8 +4497,8 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['test', 'msg']), - test=typedef.GetSetProperty(Assert_get_test, Assert_set_test, cls=Assert), - msg=typedef.GetSetProperty(Assert_get_msg, Assert_set_msg, cls=Assert), + test=typedef.GetSetProperty(Assert_get_test, Assert_set_test, Assert_del_test, cls=Assert), + msg=typedef.GetSetProperty(Assert_get_msg, Assert_set_msg, Assert_del_msg, cls=Assert), __new__=interp2app(get_AST_new(Assert)), __init__=interp2app(Assert_init), ) @@ -4237,6 +4519,12 @@ w_self.w_names = w_new_value w_self.initialization_state |= 4 +def Import_del_names(space, w_self): + # Check if the element exists, raise appropriate exceptions + Import_get_names(space, w_self) + w_self.deldictvalue(space, 'names') + w_self.initialization_state &= ~4 + _Import_field_unroller = unrolling_iterable(['names']) def Import_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Import, w_self) @@ -4257,7 +4545,7 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['names']), - names=typedef.GetSetProperty(Import_get_names, Import_set_names, cls=Import), + names=typedef.GetSetProperty(Import_get_names, Import_set_names, Import_del_names, cls=Import), __new__=interp2app(get_AST_new(Import)), __init__=interp2app(Import_init), ) @@ -4286,6 +4574,12 @@ w_self.deldictvalue(space, 'module') w_self.initialization_state |= 4 +def ImportFrom_del_module(space, w_self): + # Check if the element exists, raise appropriate exceptions + ImportFrom_get_module(space, w_self) + w_self.deldictvalue(space, 'module') + w_self.initialization_state &= ~4 + def ImportFrom_get_names(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'names') @@ -4302,6 +4596,12 @@ w_self.w_names = w_new_value w_self.initialization_state |= 8 +def ImportFrom_del_names(space, w_self): + # Check if the element exists, raise appropriate exceptions + ImportFrom_get_names(space, w_self) + w_self.deldictvalue(space, 'names') + w_self.initialization_state &= ~8 + def ImportFrom_get_level(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'level') @@ -4323,6 +4623,12 @@ w_self.deldictvalue(space, 'level') w_self.initialization_state |= 16 +def ImportFrom_del_level(space, w_self): + # Check if the element exists, raise appropriate exceptions + ImportFrom_get_level(space, w_self) + w_self.deldictvalue(space, 'level') + w_self.initialization_state &= ~16 + _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level']) def ImportFrom_init(space, w_self, __args__): w_self = space.descr_self_interp_w(ImportFrom, w_self) @@ -4343,9 +4649,9 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['module', 'names', 'level']), - module=typedef.GetSetProperty(ImportFrom_get_module, ImportFrom_set_module, cls=ImportFrom), - names=typedef.GetSetProperty(ImportFrom_get_names, ImportFrom_set_names, cls=ImportFrom), - level=typedef.GetSetProperty(ImportFrom_get_level, ImportFrom_set_level, cls=ImportFrom), + module=typedef.GetSetProperty(ImportFrom_get_module, ImportFrom_set_module, ImportFrom_del_module, cls=ImportFrom), + names=typedef.GetSetProperty(ImportFrom_get_names, ImportFrom_set_names, ImportFrom_del_names, cls=ImportFrom), + level=typedef.GetSetProperty(ImportFrom_get_level, ImportFrom_set_level, ImportFrom_del_level, cls=ImportFrom), __new__=interp2app(get_AST_new(ImportFrom)), __init__=interp2app(ImportFrom_init), ) @@ -4373,6 +4679,12 @@ w_self.deldictvalue(space, 'body') w_self.initialization_state |= 4 +def Exec_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + Exec_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~4 + def Exec_get_globals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'globals') @@ -4396,6 +4708,12 @@ w_self.deldictvalue(space, 'globals') w_self.initialization_state |= 8 +def Exec_del_globals(space, w_self): + # Check if the element exists, raise appropriate exceptions + Exec_get_globals(space, w_self) + w_self.deldictvalue(space, 'globals') + w_self.initialization_state &= ~8 + def Exec_get_locals(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'locals') @@ -4419,6 +4737,12 @@ w_self.deldictvalue(space, 'locals') w_self.initialization_state |= 16 +def Exec_del_locals(space, w_self): + # Check if the element exists, raise appropriate exceptions + Exec_get_locals(space, w_self) + w_self.deldictvalue(space, 'locals') + w_self.initialization_state &= ~16 + _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals']) def Exec_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Exec, w_self) @@ -4438,9 +4762,9 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['body', 'globals', 'locals']), - body=typedef.GetSetProperty(Exec_get_body, Exec_set_body, cls=Exec), - globals=typedef.GetSetProperty(Exec_get_globals, Exec_set_globals, cls=Exec), - locals=typedef.GetSetProperty(Exec_get_locals, Exec_set_locals, cls=Exec), + body=typedef.GetSetProperty(Exec_get_body, Exec_set_body, Exec_del_body, cls=Exec), + globals=typedef.GetSetProperty(Exec_get_globals, Exec_set_globals, Exec_del_globals, cls=Exec), + locals=typedef.GetSetProperty(Exec_get_locals, Exec_set_locals, Exec_del_locals, cls=Exec), __new__=interp2app(get_AST_new(Exec)), __init__=interp2app(Exec_init), ) @@ -4461,6 +4785,12 @@ w_self.w_names = w_new_value w_self.initialization_state |= 4 +def Global_del_names(space, w_self): + # Check if the element exists, raise appropriate exceptions + Global_get_names(space, w_self) + w_self.deldictvalue(space, 'names') + w_self.initialization_state &= ~4 + _Global_field_unroller = unrolling_iterable(['names']) def Global_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Global, w_self) @@ -4481,7 +4811,7 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['names']), - names=typedef.GetSetProperty(Global_get_names, Global_set_names, cls=Global), + names=typedef.GetSetProperty(Global_get_names, Global_set_names, Global_del_names, cls=Global), __new__=interp2app(get_AST_new(Global)), __init__=interp2app(Global_init), ) @@ -4509,6 +4839,12 @@ w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 +def Expr_del_value(space, w_self): + # Check if the element exists, raise appropriate exceptions + Expr_get_value(space, w_self) + w_self.deldictvalue(space, 'value') + w_self.initialization_state &= ~4 + _Expr_field_unroller = unrolling_iterable(['value']) def Expr_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Expr, w_self) @@ -4528,7 +4864,7 @@ stmt.typedef, __module__='_ast', _fields=_FieldsWrapper(['value']), - value=typedef.GetSetProperty(Expr_get_value, Expr_set_value, cls=Expr), + value=typedef.GetSetProperty(Expr_get_value, Expr_set_value, Expr_del_value, cls=Expr), __new__=interp2app(get_AST_new(Expr)), __init__=interp2app(Expr_init), ) @@ -4605,6 +4941,12 @@ w_self.deldictvalue(space, 'lineno') w_self.initialization_state |= 1 +def expr_del_lineno(space, w_self): + # Check if the element exists, raise appropriate exceptions + expr_get_lineno(space, w_self) + w_self.deldictvalue(space, 'lineno') + w_self.initialization_state &= ~1 + def expr_get_col_offset(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'col_offset') @@ -4626,12 +4968,18 @@ w_self.deldictvalue(space, 'col_offset') w_self.initialization_state |= 2 +def expr_del_col_offset(space, w_self): + # Check if the element exists, raise appropriate exceptions + expr_get_col_offset(space, w_self) + w_self.deldictvalue(space, 'col_offset') + w_self.initialization_state &= ~2 + expr.typedef = typedef.TypeDef("expr", AST.typedef, __module__='_ast', _attributes=_FieldsWrapper(['lineno', 'col_offset']), - lineno=typedef.GetSetProperty(expr_get_lineno, expr_set_lineno, cls=expr), - col_offset=typedef.GetSetProperty(expr_get_col_offset, expr_set_col_offset, cls=expr), + lineno=typedef.GetSetProperty(expr_get_lineno, expr_set_lineno, expr_del_lineno, cls=expr), + col_offset=typedef.GetSetProperty(expr_get_col_offset, expr_set_col_offset, expr_del_col_offset, cls=expr), __new__=interp2app(get_AST_new(expr)), ) @@ -4658,6 +5006,12 @@ w_self.setdictvalue(space, 'op', w_new_value) w_self.initialization_state |= 4 +def BoolOp_del_op(space, w_self): + # Check if the element exists, raise appropriate exceptions + BoolOp_get_op(space, w_self) + w_self.deldictvalue(space, 'op') + w_self.initialization_state &= ~4 + def BoolOp_get_values(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'values') @@ -4674,6 +5028,12 @@ w_self.w_values = w_new_value w_self.initialization_state |= 8 +def BoolOp_del_values(space, w_self): + # Check if the element exists, raise appropriate exceptions + BoolOp_get_values(space, w_self) + w_self.deldictvalue(space, 'values') + w_self.initialization_state &= ~8 + _BoolOp_field_unroller = unrolling_iterable(['op', 'values']) def BoolOp_init(space, w_self, __args__): w_self = space.descr_self_interp_w(BoolOp, w_self) @@ -4694,8 +5054,8 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['op', 'values']), - op=typedef.GetSetProperty(BoolOp_get_op, BoolOp_set_op, cls=BoolOp), - values=typedef.GetSetProperty(BoolOp_get_values, BoolOp_set_values, cls=BoolOp), + op=typedef.GetSetProperty(BoolOp_get_op, BoolOp_set_op, BoolOp_del_op, cls=BoolOp), + values=typedef.GetSetProperty(BoolOp_get_values, BoolOp_set_values, BoolOp_del_values, cls=BoolOp), __new__=interp2app(get_AST_new(BoolOp)), __init__=interp2app(BoolOp_init), ) @@ -4723,6 +5083,12 @@ w_self.deldictvalue(space, 'left') w_self.initialization_state |= 4 +def BinOp_del_left(space, w_self): + # Check if the element exists, raise appropriate exceptions + BinOp_get_left(space, w_self) + w_self.deldictvalue(space, 'left') + w_self.initialization_state &= ~4 + def BinOp_get_op(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'op') @@ -4746,6 +5112,12 @@ w_self.setdictvalue(space, 'op', w_new_value) w_self.initialization_state |= 8 +def BinOp_del_op(space, w_self): + # Check if the element exists, raise appropriate exceptions + BinOp_get_op(space, w_self) + w_self.deldictvalue(space, 'op') + w_self.initialization_state &= ~8 + def BinOp_get_right(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'right') @@ -4769,6 +5141,12 @@ w_self.deldictvalue(space, 'right') w_self.initialization_state |= 16 +def BinOp_del_right(space, w_self): + # Check if the element exists, raise appropriate exceptions + BinOp_get_right(space, w_self) + w_self.deldictvalue(space, 'right') + w_self.initialization_state &= ~16 + _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right']) def BinOp_init(space, w_self, __args__): w_self = space.descr_self_interp_w(BinOp, w_self) @@ -4788,9 +5166,9 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['left', 'op', 'right']), - left=typedef.GetSetProperty(BinOp_get_left, BinOp_set_left, cls=BinOp), - op=typedef.GetSetProperty(BinOp_get_op, BinOp_set_op, cls=BinOp), - right=typedef.GetSetProperty(BinOp_get_right, BinOp_set_right, cls=BinOp), + left=typedef.GetSetProperty(BinOp_get_left, BinOp_set_left, BinOp_del_left, cls=BinOp), + op=typedef.GetSetProperty(BinOp_get_op, BinOp_set_op, BinOp_del_op, cls=BinOp), + right=typedef.GetSetProperty(BinOp_get_right, BinOp_set_right, BinOp_del_right, cls=BinOp), __new__=interp2app(get_AST_new(BinOp)), __init__=interp2app(BinOp_init), ) @@ -4818,6 +5196,12 @@ w_self.setdictvalue(space, 'op', w_new_value) w_self.initialization_state |= 4 +def UnaryOp_del_op(space, w_self): + # Check if the element exists, raise appropriate exceptions + UnaryOp_get_op(space, w_self) + w_self.deldictvalue(space, 'op') + w_self.initialization_state &= ~4 + def UnaryOp_get_operand(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'operand') @@ -4841,6 +5225,12 @@ w_self.deldictvalue(space, 'operand') w_self.initialization_state |= 8 +def UnaryOp_del_operand(space, w_self): + # Check if the element exists, raise appropriate exceptions + UnaryOp_get_operand(space, w_self) + w_self.deldictvalue(space, 'operand') + w_self.initialization_state &= ~8 + _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand']) def UnaryOp_init(space, w_self, __args__): w_self = space.descr_self_interp_w(UnaryOp, w_self) @@ -4860,8 +5250,8 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['op', 'operand']), - op=typedef.GetSetProperty(UnaryOp_get_op, UnaryOp_set_op, cls=UnaryOp), - operand=typedef.GetSetProperty(UnaryOp_get_operand, UnaryOp_set_operand, cls=UnaryOp), + op=typedef.GetSetProperty(UnaryOp_get_op, UnaryOp_set_op, UnaryOp_del_op, cls=UnaryOp), + operand=typedef.GetSetProperty(UnaryOp_get_operand, UnaryOp_set_operand, UnaryOp_del_operand, cls=UnaryOp), __new__=interp2app(get_AST_new(UnaryOp)), __init__=interp2app(UnaryOp_init), ) @@ -4887,6 +5277,12 @@ w_self.deldictvalue(space, 'args') w_self.initialization_state |= 4 +def Lambda_del_args(space, w_self): + # Check if the element exists, raise appropriate exceptions + Lambda_get_args(space, w_self) + w_self.deldictvalue(space, 'args') + w_self.initialization_state &= ~4 + def Lambda_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') @@ -4910,6 +5306,12 @@ w_self.deldictvalue(space, 'body') w_self.initialization_state |= 8 +def Lambda_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + Lambda_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~8 + _Lambda_field_unroller = unrolling_iterable(['args', 'body']) def Lambda_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Lambda, w_self) @@ -4929,8 +5331,8 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['args', 'body']), - args=typedef.GetSetProperty(Lambda_get_args, Lambda_set_args, cls=Lambda), - body=typedef.GetSetProperty(Lambda_get_body, Lambda_set_body, cls=Lambda), + args=typedef.GetSetProperty(Lambda_get_args, Lambda_set_args, Lambda_del_args, cls=Lambda), + body=typedef.GetSetProperty(Lambda_get_body, Lambda_set_body, Lambda_del_body, cls=Lambda), __new__=interp2app(get_AST_new(Lambda)), __init__=interp2app(Lambda_init), ) @@ -4958,6 +5360,12 @@ w_self.deldictvalue(space, 'test') w_self.initialization_state |= 4 +def IfExp_del_test(space, w_self): + # Check if the element exists, raise appropriate exceptions + IfExp_get_test(space, w_self) + w_self.deldictvalue(space, 'test') + w_self.initialization_state &= ~4 + def IfExp_get_body(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'body') @@ -4981,6 +5389,12 @@ w_self.deldictvalue(space, 'body') w_self.initialization_state |= 8 +def IfExp_del_body(space, w_self): + # Check if the element exists, raise appropriate exceptions + IfExp_get_body(space, w_self) + w_self.deldictvalue(space, 'body') + w_self.initialization_state &= ~8 + def IfExp_get_orelse(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'orelse') @@ -5004,6 +5418,12 @@ w_self.deldictvalue(space, 'orelse') w_self.initialization_state |= 16 +def IfExp_del_orelse(space, w_self): + # Check if the element exists, raise appropriate exceptions + IfExp_get_orelse(space, w_self) + w_self.deldictvalue(space, 'orelse') + w_self.initialization_state &= ~16 + _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse']) def IfExp_init(space, w_self, __args__): w_self = space.descr_self_interp_w(IfExp, w_self) @@ -5023,9 +5443,9 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['test', 'body', 'orelse']), - test=typedef.GetSetProperty(IfExp_get_test, IfExp_set_test, cls=IfExp), - body=typedef.GetSetProperty(IfExp_get_body, IfExp_set_body, cls=IfExp), - orelse=typedef.GetSetProperty(IfExp_get_orelse, IfExp_set_orelse, cls=IfExp), + test=typedef.GetSetProperty(IfExp_get_test, IfExp_set_test, IfExp_del_test, cls=IfExp), + body=typedef.GetSetProperty(IfExp_get_body, IfExp_set_body, IfExp_del_body, cls=IfExp), + orelse=typedef.GetSetProperty(IfExp_get_orelse, IfExp_set_orelse, IfExp_del_orelse, cls=IfExp), __new__=interp2app(get_AST_new(IfExp)), __init__=interp2app(IfExp_init), ) @@ -5046,6 +5466,12 @@ w_self.w_keys = w_new_value w_self.initialization_state |= 4 +def Dict_del_keys(space, w_self): + # Check if the element exists, raise appropriate exceptions + Dict_get_keys(space, w_self) + w_self.deldictvalue(space, 'keys') + w_self.initialization_state &= ~4 + def Dict_get_values(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'values') @@ -5062,6 +5488,12 @@ w_self.w_values = w_new_value w_self.initialization_state |= 8 +def Dict_del_values(space, w_self): + # Check if the element exists, raise appropriate exceptions + Dict_get_values(space, w_self) + w_self.deldictvalue(space, 'values') + w_self.initialization_state &= ~8 + _Dict_field_unroller = unrolling_iterable(['keys', 'values']) def Dict_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Dict, w_self) @@ -5083,8 +5515,8 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['keys', 'values']), - keys=typedef.GetSetProperty(Dict_get_keys, Dict_set_keys, cls=Dict), - values=typedef.GetSetProperty(Dict_get_values, Dict_set_values, cls=Dict), + keys=typedef.GetSetProperty(Dict_get_keys, Dict_set_keys, Dict_del_keys, cls=Dict), + values=typedef.GetSetProperty(Dict_get_values, Dict_set_values, Dict_del_values, cls=Dict), __new__=interp2app(get_AST_new(Dict)), __init__=interp2app(Dict_init), ) @@ -5105,6 +5537,12 @@ w_self.w_elts = w_new_value w_self.initialization_state |= 4 +def Set_del_elts(space, w_self): + # Check if the element exists, raise appropriate exceptions + Set_get_elts(space, w_self) + w_self.deldictvalue(space, 'elts') + w_self.initialization_state &= ~4 + _Set_field_unroller = unrolling_iterable(['elts']) def Set_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Set, w_self) @@ -5125,7 +5563,7 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['elts']), - elts=typedef.GetSetProperty(Set_get_elts, Set_set_elts, cls=Set), + elts=typedef.GetSetProperty(Set_get_elts, Set_set_elts, Set_del_elts, cls=Set), __new__=interp2app(get_AST_new(Set)), __init__=interp2app(Set_init), ) @@ -5153,6 +5591,12 @@ w_self.deldictvalue(space, 'elt') w_self.initialization_state |= 4 +def ListComp_del_elt(space, w_self): + # Check if the element exists, raise appropriate exceptions + ListComp_get_elt(space, w_self) + w_self.deldictvalue(space, 'elt') + w_self.initialization_state &= ~4 + def ListComp_get_generators(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'generators') @@ -5169,6 +5613,12 @@ w_self.w_generators = w_new_value w_self.initialization_state |= 8 +def ListComp_del_generators(space, w_self): + # Check if the element exists, raise appropriate exceptions + ListComp_get_generators(space, w_self) + w_self.deldictvalue(space, 'generators') + w_self.initialization_state &= ~8 + _ListComp_field_unroller = unrolling_iterable(['elt', 'generators']) def ListComp_init(space, w_self, __args__): w_self = space.descr_self_interp_w(ListComp, w_self) @@ -5189,8 +5639,8 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['elt', 'generators']), - elt=typedef.GetSetProperty(ListComp_get_elt, ListComp_set_elt, cls=ListComp), - generators=typedef.GetSetProperty(ListComp_get_generators, ListComp_set_generators, cls=ListComp), + elt=typedef.GetSetProperty(ListComp_get_elt, ListComp_set_elt, ListComp_del_elt, cls=ListComp), + generators=typedef.GetSetProperty(ListComp_get_generators, ListComp_set_generators, ListComp_del_generators, cls=ListComp), __new__=interp2app(get_AST_new(ListComp)), __init__=interp2app(ListComp_init), ) @@ -5218,6 +5668,12 @@ w_self.deldictvalue(space, 'elt') w_self.initialization_state |= 4 +def SetComp_del_elt(space, w_self): + # Check if the element exists, raise appropriate exceptions + SetComp_get_elt(space, w_self) + w_self.deldictvalue(space, 'elt') + w_self.initialization_state &= ~4 + def SetComp_get_generators(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'generators') @@ -5234,6 +5690,12 @@ w_self.w_generators = w_new_value w_self.initialization_state |= 8 +def SetComp_del_generators(space, w_self): + # Check if the element exists, raise appropriate exceptions + SetComp_get_generators(space, w_self) + w_self.deldictvalue(space, 'generators') + w_self.initialization_state &= ~8 + _SetComp_field_unroller = unrolling_iterable(['elt', 'generators']) def SetComp_init(space, w_self, __args__): w_self = space.descr_self_interp_w(SetComp, w_self) @@ -5254,8 +5716,8 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['elt', 'generators']), - elt=typedef.GetSetProperty(SetComp_get_elt, SetComp_set_elt, cls=SetComp), - generators=typedef.GetSetProperty(SetComp_get_generators, SetComp_set_generators, cls=SetComp), + elt=typedef.GetSetProperty(SetComp_get_elt, SetComp_set_elt, SetComp_del_elt, cls=SetComp), + generators=typedef.GetSetProperty(SetComp_get_generators, SetComp_set_generators, SetComp_del_generators, cls=SetComp), __new__=interp2app(get_AST_new(SetComp)), __init__=interp2app(SetComp_init), ) @@ -5283,6 +5745,12 @@ w_self.deldictvalue(space, 'key') w_self.initialization_state |= 4 +def DictComp_del_key(space, w_self): + # Check if the element exists, raise appropriate exceptions + DictComp_get_key(space, w_self) + w_self.deldictvalue(space, 'key') + w_self.initialization_state &= ~4 + def DictComp_get_value(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'value') @@ -5306,6 +5774,12 @@ w_self.deldictvalue(space, 'value') w_self.initialization_state |= 8 +def DictComp_del_value(space, w_self): + # Check if the element exists, raise appropriate exceptions + DictComp_get_value(space, w_self) + w_self.deldictvalue(space, 'value') + w_self.initialization_state &= ~8 + def DictComp_get_generators(space, w_self): if not w_self.initialization_state & 16: raise_attriberr(space, w_self, 'generators') @@ -5322,6 +5796,12 @@ w_self.w_generators = w_new_value w_self.initialization_state |= 16 +def DictComp_del_generators(space, w_self): + # Check if the element exists, raise appropriate exceptions + DictComp_get_generators(space, w_self) + w_self.deldictvalue(space, 'generators') + w_self.initialization_state &= ~16 + _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators']) def DictComp_init(space, w_self, __args__): w_self = space.descr_self_interp_w(DictComp, w_self) @@ -5342,9 +5822,9 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['key', 'value', 'generators']), - key=typedef.GetSetProperty(DictComp_get_key, DictComp_set_key, cls=DictComp), - value=typedef.GetSetProperty(DictComp_get_value, DictComp_set_value, cls=DictComp), - generators=typedef.GetSetProperty(DictComp_get_generators, DictComp_set_generators, cls=DictComp), + key=typedef.GetSetProperty(DictComp_get_key, DictComp_set_key, DictComp_del_key, cls=DictComp), + value=typedef.GetSetProperty(DictComp_get_value, DictComp_set_value, DictComp_del_value, cls=DictComp), + generators=typedef.GetSetProperty(DictComp_get_generators, DictComp_set_generators, DictComp_del_generators, cls=DictComp), __new__=interp2app(get_AST_new(DictComp)), __init__=interp2app(DictComp_init), ) @@ -5372,6 +5852,12 @@ w_self.deldictvalue(space, 'elt') w_self.initialization_state |= 4 +def GeneratorExp_del_elt(space, w_self): + # Check if the element exists, raise appropriate exceptions + GeneratorExp_get_elt(space, w_self) + w_self.deldictvalue(space, 'elt') + w_self.initialization_state &= ~4 + def GeneratorExp_get_generators(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'generators') @@ -5388,6 +5874,12 @@ w_self.w_generators = w_new_value w_self.initialization_state |= 8 +def GeneratorExp_del_generators(space, w_self): + # Check if the element exists, raise appropriate exceptions + GeneratorExp_get_generators(space, w_self) + w_self.deldictvalue(space, 'generators') + w_self.initialization_state &= ~8 + _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators']) def GeneratorExp_init(space, w_self, __args__): w_self = space.descr_self_interp_w(GeneratorExp, w_self) @@ -5408,8 +5900,8 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['elt', 'generators']), - elt=typedef.GetSetProperty(GeneratorExp_get_elt, GeneratorExp_set_elt, cls=GeneratorExp), - generators=typedef.GetSetProperty(GeneratorExp_get_generators, GeneratorExp_set_generators, cls=GeneratorExp), + elt=typedef.GetSetProperty(GeneratorExp_get_elt, GeneratorExp_set_elt, GeneratorExp_del_elt, cls=GeneratorExp), + generators=typedef.GetSetProperty(GeneratorExp_get_generators, GeneratorExp_set_generators, GeneratorExp_del_generators, cls=GeneratorExp), __new__=interp2app(get_AST_new(GeneratorExp)), __init__=interp2app(GeneratorExp_init), ) @@ -5437,6 +5929,12 @@ w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 +def Yield_del_value(space, w_self): + # Check if the element exists, raise appropriate exceptions + Yield_get_value(space, w_self) + w_self.deldictvalue(space, 'value') + w_self.initialization_state &= ~4 + _Yield_field_unroller = unrolling_iterable(['value']) def Yield_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Yield, w_self) @@ -5456,7 +5954,7 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['value']), - value=typedef.GetSetProperty(Yield_get_value, Yield_set_value, cls=Yield), + value=typedef.GetSetProperty(Yield_get_value, Yield_set_value, Yield_del_value, cls=Yield), __new__=interp2app(get_AST_new(Yield)), __init__=interp2app(Yield_init), ) @@ -5484,6 +5982,12 @@ w_self.deldictvalue(space, 'left') w_self.initialization_state |= 4 +def Compare_del_left(space, w_self): + # Check if the element exists, raise appropriate exceptions + Compare_get_left(space, w_self) + w_self.deldictvalue(space, 'left') + w_self.initialization_state &= ~4 + def Compare_get_ops(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'ops') @@ -5500,6 +6004,12 @@ w_self.w_ops = w_new_value w_self.initialization_state |= 8 +def Compare_del_ops(space, w_self): + # Check if the element exists, raise appropriate exceptions + Compare_get_ops(space, w_self) + w_self.deldictvalue(space, 'ops') + w_self.initialization_state &= ~8 + def Compare_get_comparators(space, w_self): if not w_self.initialization_state & 16: raise_attriberr(space, w_self, 'comparators') @@ -5516,6 +6026,12 @@ w_self.w_comparators = w_new_value w_self.initialization_state |= 16 +def Compare_del_comparators(space, w_self): + # Check if the element exists, raise appropriate exceptions + Compare_get_comparators(space, w_self) + w_self.deldictvalue(space, 'comparators') + w_self.initialization_state &= ~16 + _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators']) def Compare_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Compare, w_self) @@ -5537,9 +6053,9 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['left', 'ops', 'comparators']), - left=typedef.GetSetProperty(Compare_get_left, Compare_set_left, cls=Compare), - ops=typedef.GetSetProperty(Compare_get_ops, Compare_set_ops, cls=Compare), - comparators=typedef.GetSetProperty(Compare_get_comparators, Compare_set_comparators, cls=Compare), + left=typedef.GetSetProperty(Compare_get_left, Compare_set_left, Compare_del_left, cls=Compare), + ops=typedef.GetSetProperty(Compare_get_ops, Compare_set_ops, Compare_del_ops, cls=Compare), + comparators=typedef.GetSetProperty(Compare_get_comparators, Compare_set_comparators, Compare_del_comparators, cls=Compare), __new__=interp2app(get_AST_new(Compare)), __init__=interp2app(Compare_init), ) @@ -5567,6 +6083,12 @@ w_self.deldictvalue(space, 'func') w_self.initialization_state |= 4 +def Call_del_func(space, w_self): + # Check if the element exists, raise appropriate exceptions + Call_get_func(space, w_self) + w_self.deldictvalue(space, 'func') + w_self.initialization_state &= ~4 + def Call_get_args(space, w_self): if not w_self.initialization_state & 8: raise_attriberr(space, w_self, 'args') @@ -5583,6 +6105,12 @@ w_self.w_args = w_new_value w_self.initialization_state |= 8 +def Call_del_args(space, w_self): + # Check if the element exists, raise appropriate exceptions + Call_get_args(space, w_self) + w_self.deldictvalue(space, 'args') + w_self.initialization_state &= ~8 + def Call_get_keywords(space, w_self): if not w_self.initialization_state & 16: raise_attriberr(space, w_self, 'keywords') @@ -5599,6 +6127,12 @@ w_self.w_keywords = w_new_value w_self.initialization_state |= 16 +def Call_del_keywords(space, w_self): + # Check if the element exists, raise appropriate exceptions + Call_get_keywords(space, w_self) + w_self.deldictvalue(space, 'keywords') + w_self.initialization_state &= ~16 + def Call_get_starargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'starargs') @@ -5622,6 +6156,12 @@ w_self.deldictvalue(space, 'starargs') w_self.initialization_state |= 32 +def Call_del_starargs(space, w_self): + # Check if the element exists, raise appropriate exceptions + Call_get_starargs(space, w_self) + w_self.deldictvalue(space, 'starargs') + w_self.initialization_state &= ~32 + def Call_get_kwargs(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'kwargs') @@ -5645,6 +6185,12 @@ w_self.deldictvalue(space, 'kwargs') w_self.initialization_state |= 64 +def Call_del_kwargs(space, w_self): + # Check if the element exists, raise appropriate exceptions + Call_get_kwargs(space, w_self) + w_self.deldictvalue(space, 'kwargs') + w_self.initialization_state &= ~64 + _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs']) def Call_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Call, w_self) @@ -5666,11 +6212,11 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['func', 'args', 'keywords', 'starargs', 'kwargs']), - func=typedef.GetSetProperty(Call_get_func, Call_set_func, cls=Call), - args=typedef.GetSetProperty(Call_get_args, Call_set_args, cls=Call), - keywords=typedef.GetSetProperty(Call_get_keywords, Call_set_keywords, cls=Call), - starargs=typedef.GetSetProperty(Call_get_starargs, Call_set_starargs, cls=Call), - kwargs=typedef.GetSetProperty(Call_get_kwargs, Call_set_kwargs, cls=Call), + func=typedef.GetSetProperty(Call_get_func, Call_set_func, Call_del_func, cls=Call), + args=typedef.GetSetProperty(Call_get_args, Call_set_args, Call_del_args, cls=Call), + keywords=typedef.GetSetProperty(Call_get_keywords, Call_set_keywords, Call_del_keywords, cls=Call), + starargs=typedef.GetSetProperty(Call_get_starargs, Call_set_starargs, Call_del_starargs, cls=Call), + kwargs=typedef.GetSetProperty(Call_get_kwargs, Call_set_kwargs, Call_del_kwargs, cls=Call), __new__=interp2app(get_AST_new(Call)), __init__=interp2app(Call_init), ) @@ -5698,6 +6244,12 @@ w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 +def Repr_del_value(space, w_self): + # Check if the element exists, raise appropriate exceptions + Repr_get_value(space, w_self) + w_self.deldictvalue(space, 'value') + w_self.initialization_state &= ~4 + _Repr_field_unroller = unrolling_iterable(['value']) def Repr_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Repr, w_self) @@ -5717,7 +6269,7 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['value']), - value=typedef.GetSetProperty(Repr_get_value, Repr_set_value, cls=Repr), + value=typedef.GetSetProperty(Repr_get_value, Repr_set_value, Repr_del_value, cls=Repr), __new__=interp2app(get_AST_new(Repr)), __init__=interp2app(Repr_init), ) @@ -5743,6 +6295,12 @@ w_self.deldictvalue(space, 'n') w_self.initialization_state |= 4 +def Num_del_n(space, w_self): + # Check if the element exists, raise appropriate exceptions + Num_get_n(space, w_self) + w_self.deldictvalue(space, 'n') + w_self.initialization_state &= ~4 + _Num_field_unroller = unrolling_iterable(['n']) def Num_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Num, w_self) @@ -5762,7 +6320,7 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['n']), - n=typedef.GetSetProperty(Num_get_n, Num_set_n, cls=Num), + n=typedef.GetSetProperty(Num_get_n, Num_set_n, Num_del_n, cls=Num), __new__=interp2app(get_AST_new(Num)), __init__=interp2app(Num_init), ) @@ -5788,6 +6346,12 @@ w_self.deldictvalue(space, 's') w_self.initialization_state |= 4 +def Str_del_s(space, w_self): + # Check if the element exists, raise appropriate exceptions + Str_get_s(space, w_self) + w_self.deldictvalue(space, 's') + w_self.initialization_state &= ~4 + _Str_field_unroller = unrolling_iterable(['s']) def Str_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Str, w_self) @@ -5807,7 +6371,7 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['s']), - s=typedef.GetSetProperty(Str_get_s, Str_set_s, cls=Str), + s=typedef.GetSetProperty(Str_get_s, Str_set_s, Str_del_s, cls=Str), __new__=interp2app(get_AST_new(Str)), __init__=interp2app(Str_init), ) @@ -5835,6 +6399,12 @@ w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 +def Attribute_del_value(space, w_self): + # Check if the element exists, raise appropriate exceptions + Attribute_get_value(space, w_self) + w_self.deldictvalue(space, 'value') + w_self.initialization_state &= ~4 + def Attribute_get_attr(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'attr') @@ -5856,6 +6426,12 @@ w_self.deldictvalue(space, 'attr') w_self.initialization_state |= 8 +def Attribute_del_attr(space, w_self): + # Check if the element exists, raise appropriate exceptions + Attribute_get_attr(space, w_self) + w_self.deldictvalue(space, 'attr') + w_self.initialization_state &= ~8 + def Attribute_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') @@ -5879,6 +6455,12 @@ w_self.setdictvalue(space, 'ctx', w_new_value) w_self.initialization_state |= 16 +def Attribute_del_ctx(space, w_self): + # Check if the element exists, raise appropriate exceptions + Attribute_get_ctx(space, w_self) + w_self.deldictvalue(space, 'ctx') + w_self.initialization_state &= ~16 + _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx']) def Attribute_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Attribute, w_self) @@ -5898,9 +6480,9 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['value', 'attr', 'ctx']), - value=typedef.GetSetProperty(Attribute_get_value, Attribute_set_value, cls=Attribute), - attr=typedef.GetSetProperty(Attribute_get_attr, Attribute_set_attr, cls=Attribute), - ctx=typedef.GetSetProperty(Attribute_get_ctx, Attribute_set_ctx, cls=Attribute), + value=typedef.GetSetProperty(Attribute_get_value, Attribute_set_value, Attribute_del_value, cls=Attribute), + attr=typedef.GetSetProperty(Attribute_get_attr, Attribute_set_attr, Attribute_del_attr, cls=Attribute), + ctx=typedef.GetSetProperty(Attribute_get_ctx, Attribute_set_ctx, Attribute_del_ctx, cls=Attribute), __new__=interp2app(get_AST_new(Attribute)), __init__=interp2app(Attribute_init), ) @@ -5928,6 +6510,12 @@ w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 +def Subscript_del_value(space, w_self): + # Check if the element exists, raise appropriate exceptions + Subscript_get_value(space, w_self) + w_self.deldictvalue(space, 'value') + w_self.initialization_state &= ~4 + def Subscript_get_slice(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'slice') @@ -5951,6 +6539,12 @@ w_self.deldictvalue(space, 'slice') w_self.initialization_state |= 8 +def Subscript_del_slice(space, w_self): + # Check if the element exists, raise appropriate exceptions + Subscript_get_slice(space, w_self) + w_self.deldictvalue(space, 'slice') + w_self.initialization_state &= ~8 + def Subscript_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') @@ -5974,6 +6568,12 @@ w_self.setdictvalue(space, 'ctx', w_new_value) w_self.initialization_state |= 16 +def Subscript_del_ctx(space, w_self): + # Check if the element exists, raise appropriate exceptions + Subscript_get_ctx(space, w_self) + w_self.deldictvalue(space, 'ctx') + w_self.initialization_state &= ~16 + _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx']) def Subscript_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Subscript, w_self) @@ -5993,9 +6593,9 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['value', 'slice', 'ctx']), - value=typedef.GetSetProperty(Subscript_get_value, Subscript_set_value, cls=Subscript), - slice=typedef.GetSetProperty(Subscript_get_slice, Subscript_set_slice, cls=Subscript), - ctx=typedef.GetSetProperty(Subscript_get_ctx, Subscript_set_ctx, cls=Subscript), + value=typedef.GetSetProperty(Subscript_get_value, Subscript_set_value, Subscript_del_value, cls=Subscript), + slice=typedef.GetSetProperty(Subscript_get_slice, Subscript_set_slice, Subscript_del_slice, cls=Subscript), + ctx=typedef.GetSetProperty(Subscript_get_ctx, Subscript_set_ctx, Subscript_del_ctx, cls=Subscript), __new__=interp2app(get_AST_new(Subscript)), __init__=interp2app(Subscript_init), ) @@ -6021,6 +6621,12 @@ w_self.deldictvalue(space, 'id') w_self.initialization_state |= 4 +def Name_del_id(space, w_self): + # Check if the element exists, raise appropriate exceptions + Name_get_id(space, w_self) + w_self.deldictvalue(space, 'id') + w_self.initialization_state &= ~4 + def Name_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') @@ -6044,6 +6650,12 @@ w_self.setdictvalue(space, 'ctx', w_new_value) w_self.initialization_state |= 8 +def Name_del_ctx(space, w_self): + # Check if the element exists, raise appropriate exceptions + Name_get_ctx(space, w_self) + w_self.deldictvalue(space, 'ctx') + w_self.initialization_state &= ~8 + _Name_field_unroller = unrolling_iterable(['id', 'ctx']) def Name_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Name, w_self) @@ -6063,8 +6675,8 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['id', 'ctx']), - id=typedef.GetSetProperty(Name_get_id, Name_set_id, cls=Name), - ctx=typedef.GetSetProperty(Name_get_ctx, Name_set_ctx, cls=Name), + id=typedef.GetSetProperty(Name_get_id, Name_set_id, Name_del_id, cls=Name), + ctx=typedef.GetSetProperty(Name_get_ctx, Name_set_ctx, Name_del_ctx, cls=Name), __new__=interp2app(get_AST_new(Name)), __init__=interp2app(Name_init), ) @@ -6085,6 +6697,12 @@ w_self.w_elts = w_new_value w_self.initialization_state |= 4 +def List_del_elts(space, w_self): + # Check if the element exists, raise appropriate exceptions + List_get_elts(space, w_self) + w_self.deldictvalue(space, 'elts') + w_self.initialization_state &= ~4 + def List_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') @@ -6108,6 +6726,12 @@ w_self.setdictvalue(space, 'ctx', w_new_value) w_self.initialization_state |= 8 +def List_del_ctx(space, w_self): + # Check if the element exists, raise appropriate exceptions + List_get_ctx(space, w_self) + w_self.deldictvalue(space, 'ctx') + w_self.initialization_state &= ~8 + _List_field_unroller = unrolling_iterable(['elts', 'ctx']) def List_init(space, w_self, __args__): w_self = space.descr_self_interp_w(List, w_self) @@ -6128,8 +6752,8 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['elts', 'ctx']), - elts=typedef.GetSetProperty(List_get_elts, List_set_elts, cls=List), - ctx=typedef.GetSetProperty(List_get_ctx, List_set_ctx, cls=List), + elts=typedef.GetSetProperty(List_get_elts, List_set_elts, List_del_elts, cls=List), + ctx=typedef.GetSetProperty(List_get_ctx, List_set_ctx, List_del_ctx, cls=List), __new__=interp2app(get_AST_new(List)), __init__=interp2app(List_init), ) @@ -6150,6 +6774,12 @@ w_self.w_elts = w_new_value w_self.initialization_state |= 4 +def Tuple_del_elts(space, w_self): + # Check if the element exists, raise appropriate exceptions + Tuple_get_elts(space, w_self) + w_self.deldictvalue(space, 'elts') + w_self.initialization_state &= ~4 + def Tuple_get_ctx(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'ctx') @@ -6173,6 +6803,12 @@ w_self.setdictvalue(space, 'ctx', w_new_value) w_self.initialization_state |= 8 +def Tuple_del_ctx(space, w_self): + # Check if the element exists, raise appropriate exceptions + Tuple_get_ctx(space, w_self) + w_self.deldictvalue(space, 'ctx') + w_self.initialization_state &= ~8 + _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx']) def Tuple_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Tuple, w_self) @@ -6193,8 +6829,8 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['elts', 'ctx']), - elts=typedef.GetSetProperty(Tuple_get_elts, Tuple_set_elts, cls=Tuple), - ctx=typedef.GetSetProperty(Tuple_get_ctx, Tuple_set_ctx, cls=Tuple), + elts=typedef.GetSetProperty(Tuple_get_elts, Tuple_set_elts, Tuple_del_elts, cls=Tuple), + ctx=typedef.GetSetProperty(Tuple_get_ctx, Tuple_set_ctx, Tuple_del_ctx, cls=Tuple), __new__=interp2app(get_AST_new(Tuple)), __init__=interp2app(Tuple_init), ) @@ -6220,6 +6856,12 @@ w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 +def Const_del_value(space, w_self): + # Check if the element exists, raise appropriate exceptions + Const_get_value(space, w_self) + w_self.deldictvalue(space, 'value') + w_self.initialization_state &= ~4 + _Const_field_unroller = unrolling_iterable(['value']) def Const_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Const, w_self) @@ -6239,7 +6881,7 @@ expr.typedef, __module__='_ast', _fields=_FieldsWrapper(['value']), - value=typedef.GetSetProperty(Const_get_value, Const_set_value, cls=Const), + value=typedef.GetSetProperty(Const_get_value, Const_set_value, Const_del_value, cls=Const), __new__=interp2app(get_AST_new(Const)), __init__=interp2app(Const_init), ) @@ -6340,6 +6982,12 @@ w_self.deldictvalue(space, 'lower') w_self.initialization_state |= 1 +def Slice_del_lower(space, w_self): + # Check if the element exists, raise appropriate exceptions + Slice_get_lower(space, w_self) + w_self.deldictvalue(space, 'lower') + w_self.initialization_state &= ~1 + def Slice_get_upper(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'upper') @@ -6363,6 +7011,12 @@ w_self.deldictvalue(space, 'upper') w_self.initialization_state |= 2 +def Slice_del_upper(space, w_self): + # Check if the element exists, raise appropriate exceptions + Slice_get_upper(space, w_self) + w_self.deldictvalue(space, 'upper') + w_self.initialization_state &= ~2 + def Slice_get_step(space, w_self): if w_self.w_dict is not None: w_obj = w_self.getdictvalue(space, 'step') @@ -6386,6 +7040,12 @@ w_self.deldictvalue(space, 'step') w_self.initialization_state |= 4 +def Slice_del_step(space, w_self): + # Check if the element exists, raise appropriate exceptions + Slice_get_step(space, w_self) + w_self.deldictvalue(space, 'step') + w_self.initialization_state &= ~4 + _Slice_field_unroller = unrolling_iterable(['lower', 'upper', 'step']) def Slice_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Slice, w_self) @@ -6405,9 +7065,9 @@ slice.typedef, __module__='_ast', _fields=_FieldsWrapper(['lower', 'upper', 'step']), - lower=typedef.GetSetProperty(Slice_get_lower, Slice_set_lower, cls=Slice), - upper=typedef.GetSetProperty(Slice_get_upper, Slice_set_upper, cls=Slice), - step=typedef.GetSetProperty(Slice_get_step, Slice_set_step, cls=Slice), + lower=typedef.GetSetProperty(Slice_get_lower, Slice_set_lower, Slice_del_lower, cls=Slice), + upper=typedef.GetSetProperty(Slice_get_upper, Slice_set_upper, Slice_del_upper, cls=Slice), + step=typedef.GetSetProperty(Slice_get_step, Slice_set_step, Slice_del_step, cls=Slice), __new__=interp2app(get_AST_new(Slice)), __init__=interp2app(Slice_init), ) @@ -6428,6 +7088,12 @@ w_self.w_dims = w_new_value w_self.initialization_state |= 1 From noreply at buildbot.pypy.org Fri Mar 7 21:35:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 21:35:34 +0100 (CET) Subject: [pypy-commit] pypy default: Add a passing test Message-ID: <20140307203534.8F1061C1464@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69792:ee319eb91664 Date: 2014-03-07 21:33 +0100 http://bitbucket.org/pypy/pypy/changeset/ee319eb91664/ Log: Add a passing test diff --git a/rpython/jit/metainterp/test/test_longlong.py b/rpython/jit/metainterp/test/test_longlong.py --- a/rpython/jit/metainterp/test/test_longlong.py +++ b/rpython/jit/metainterp/test/test_longlong.py @@ -138,6 +138,15 @@ res = self.interp_operations(f, [1000000000]) assert res == 12350000000000000000.0 + def test_float_to_longlong(self): + from rpython.rtyper.lltypesystem import lltype, rffi + def f(x): + compare(r_longlong(x), 0x12, 0x34567800) + compare(rffi.cast(lltype.SignedLongLong, x), 0x12, 0x34567800) + return 1 + res = self.interp_operations(f, [0x12345678 * 256.0]) + assert res == 1 + def test_unsigned_compare_ops(self): def f(n1, n2): # n == 30002000000000 From noreply at buildbot.pypy.org Fri Mar 7 21:35:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Mar 2014 21:35:35 +0100 (CET) Subject: [pypy-commit] pypy default: Support (reasonably) converting a user-supplied "double" directly to a Message-ID: <20140307203535.D28201C1464@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69793:3f834e4349e4 Date: 2014-03-07 21:34 +0100 http://bitbucket.org/pypy/pypy/changeset/3f834e4349e4/ Log: Support (reasonably) converting a user-supplied "double" directly to a time_t, which may be 64-bit even on 32-bit platforms --- notably on Windows. Should fix issue1697. diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -358,14 +358,16 @@ seconds = pytime.time() else: seconds = space.float_w(w_seconds) - try: - seconds = ovfcheck_float_to_int(seconds) - t = rffi.r_time_t(seconds) - if rffi.cast(lltype.Signed, t) != seconds: - raise OverflowError - except OverflowError: + # + t = rffi.cast(rffi.TIME_T, seconds) + # + # Logic from CPython: How much info did we lose? We assume that + # time_t is an integral type. If we lost a second or more, the + # input doesn't fit in a time_t; call it an error. + diff = seconds - rffi.cast(lltype.Float, t) + if diff <= -1.0 or diff >= 1.0: raise OperationError(space.w_ValueError, - space.wrap("time argument too large")) + space.wrap("timestamp out of range for platform time_t")) return t def _tm_to_tuple(space, t): diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -59,6 +59,8 @@ assert 0 <= (t1 - t0) < 1.2 t = rctime.time() assert rctime.gmtime(t) == rctime.gmtime(t) + raises(ValueError, rctime.gmtime, 2**64) + raises(ValueError, rctime.gmtime, -2**64) def test_localtime(self): import time as rctime From noreply at buildbot.pypy.org Sat Mar 8 09:27:27 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Sat, 8 Mar 2014 09:27:27 +0100 (CET) Subject: [pypy-commit] pyrepl msabramo/ord_char_python3: pyrepl/unix_eventqueue.py: Fix Python 3 ord(char) issue Message-ID: <20140308082727.2BBCC1C0906@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: msabramo/ord_char_python3 Changeset: r238:0cd9ad59040f Date: 2014-02-28 23:09 +0000 http://bitbucket.org/pypy/pyrepl/changeset/0cd9ad59040f/ Log: pyrepl/unix_eventqueue.py: Fix Python 3 ord(char) issue In Python 3, byte strings are composed of ints, so no need to call ord() on the elements. Solves a test failure: testing/test_unix_reader.py:11: in test_simple > q.push(c) pyrepl/unix_eventqueue.py:103: in push > self.buf.append(ord(char)) E TypeError: ord() expected string of length 1, but int found diff --git a/pyrepl/unix_eventqueue.py b/pyrepl/unix_eventqueue.py --- a/pyrepl/unix_eventqueue.py +++ b/pyrepl/unix_eventqueue.py @@ -100,7 +100,8 @@ self.events.append(event) def push(self, char): - self.buf.append(ord(char)) + ord_char = char if isinstance(char, int) else ord(char) + self.buf.append(ord_char) if char in self.k: if self.k is self.ck: #sanity check, buffer is empty when a special key comes From noreply at buildbot.pypy.org Sat Mar 8 09:27:28 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Sat, 8 Mar 2014 09:27:28 +0100 (CET) Subject: [pypy-commit] pyrepl default: Merged in msabramo/pyrepl/msabramo/ord_char_python3 (pull request #7) Message-ID: <20140308082728.35FE41C0906@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: Changeset: r239:bcea63638a4f Date: 2014-03-08 09:27 +0100 http://bitbucket.org/pypy/pyrepl/changeset/bcea63638a4f/ Log: Merged in msabramo/pyrepl/msabramo/ord_char_python3 (pull request #7) pyrepl/unix_eventqueue.py: Fix Python 3 ord(char) issue diff --git a/pyrepl/unix_eventqueue.py b/pyrepl/unix_eventqueue.py --- a/pyrepl/unix_eventqueue.py +++ b/pyrepl/unix_eventqueue.py @@ -100,7 +100,8 @@ self.events.append(event) def push(self, char): - self.buf.append(ord(char)) + ord_char = char if isinstance(char, int) else ord(char) + self.buf.append(ord_char) if char in self.k: if self.k is self.ck: #sanity check, buffer is empty when a special key comes From noreply at buildbot.pypy.org Sat Mar 8 09:27:36 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Sat, 8 Mar 2014 09:27:36 +0100 (CET) Subject: [pypy-commit] pyrepl msabramo/hgignore: .hgignore: Add \.pyc Message-ID: <20140308082736.19B121C0906@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: msabramo/hgignore Changeset: r241:12e8c4240629 Date: 2014-02-28 07:38 -0800 http://bitbucket.org/pypy/pyrepl/changeset/12e8c4240629/ Log: .hgignore: Add \.pyc diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -3,3 +3,4 @@ .cache/ \.tox/ .*\.egg-info +\.pyc From noreply at buildbot.pypy.org Sat Mar 8 09:27:37 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Sat, 8 Mar 2014 09:27:37 +0100 (CET) Subject: [pypy-commit] pyrepl msabramo/hgignore: .hgignore: Add \.swp Message-ID: <20140308082737.253CE1C0906@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: msabramo/hgignore Changeset: r242:f1ff288f7364 Date: 2014-02-28 10:20 -0800 http://bitbucket.org/pypy/pyrepl/changeset/f1ff288f7364/ Log: .hgignore: Add \.swp diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -4,3 +4,4 @@ \.tox/ .*\.egg-info \.pyc +\.swp From noreply at buildbot.pypy.org Sat Mar 8 09:27:38 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Sat, 8 Mar 2014 09:27:38 +0100 (CET) Subject: [pypy-commit] pyrepl default: Merged in msabramo/pyrepl/msabramo/hgignore (pull request #5) Message-ID: <20140308082738.2B9C61C0906@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: Changeset: r243:a1e175f1817b Date: 2014-03-08 09:27 +0100 http://bitbucket.org/pypy/pyrepl/changeset/a1e175f1817b/ Log: Merged in msabramo/pyrepl/msabramo/hgignore (pull request #5) .hgignore: Add \.pyc diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -3,3 +3,5 @@ .cache/ \.tox/ .*\.egg-info +\.pyc +\.swp From noreply at buildbot.pypy.org Sat Mar 8 09:28:21 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Sat, 8 Mar 2014 09:28:21 +0100 (CET) Subject: [pypy-commit] pyrepl fix_setup_test_basic_python3: testing/test_functional.py: Use sys.stdout.buffer for Python 3 instead of sys.stdout Message-ID: <20140308082821.0B4C71C0907@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: fix_setup_test_basic_python3 Changeset: r245:678384ba626c Date: 2014-02-28 23:45 +0000 http://bitbucket.org/pypy/pyrepl/changeset/678384ba626c/ Log: testing/test_functional.py: Use sys.stdout.buffer for Python 3 instead of sys.stdout because Python 3's sys.stdout takes unicode; not bytes. diff --git a/testing/test_functional.py b/testing/test_functional.py --- a/testing/test_functional.py +++ b/testing/test_functional.py @@ -13,7 +13,10 @@ except SyntaxError: pytest.skip('pexpect wont work on py3k') child = pexpect.spawn(sys.executable, ['-S'], timeout=10) - child.logfile = sys.stdout + if sys.version_info >= (3, ): + child.logfile = sys.stdout.buffer + else: + child.logfile = sys.stdout child.sendline('from pyrepl.python_reader import main') child.sendline('main()') return child From noreply at buildbot.pypy.org Sat Mar 8 09:28:22 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Sat, 8 Mar 2014 09:28:22 +0100 (CET) Subject: [pypy-commit] pyrepl default: Merged in msabramo/pyrepl/fix_setup_test_basic_python3 (pull request #9) Message-ID: <20140308082822.3267F1C0907@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: Changeset: r246:7720a0e701dc Date: 2014-03-08 09:28 +0100 http://bitbucket.org/pypy/pyrepl/changeset/7720a0e701dc/ Log: Merged in msabramo/pyrepl/fix_setup_test_basic_python3 (pull request #9) testing/test_functional.py: Use sys.stdout.buffer for Python 3 instead of sys.stdout diff --git a/testing/test_functional.py b/testing/test_functional.py --- a/testing/test_functional.py +++ b/testing/test_functional.py @@ -13,7 +13,10 @@ except SyntaxError: pytest.skip('pexpect wont work on py3k') child = pexpect.spawn(sys.executable, ['-S'], timeout=10) - child.logfile = sys.stdout + if sys.version_info >= (3, ): + child.logfile = sys.stdout.buffer + else: + child.logfile = sys.stdout child.sendline('from pyrepl.python_reader import main') child.sendline('main()') return child From noreply at buildbot.pypy.org Sat Mar 8 09:28:45 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Sat, 8 Mar 2014 09:28:45 +0100 (CET) Subject: [pypy-commit] pyrepl msabramo/update_tox_ini: tox.ini: envlist = py26, py27, pypy, py33 Message-ID: <20140308082845.8E8551C0907@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: msabramo/update_tox_ini Changeset: r248:341baf9ebb69 Date: 2014-02-28 17:30 +0000 http://bitbucket.org/pypy/pyrepl/changeset/341baf9ebb69/ Log: tox.ini: envlist = py26, py27, pypy, py33 diff --git a/tox.ini b/tox.ini --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist= py27, py32 +envlist = py26, py27, pypy, py33 [pytest] codechecks = pep8 pyflakes From noreply at buildbot.pypy.org Sat Mar 8 09:28:46 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Sat, 8 Mar 2014 09:28:46 +0100 (CET) Subject: [pypy-commit] pyrepl msabramo/update_tox_ini: Merge msabramo/hgignore into msabramo/_dev Message-ID: <20140308082846.9F26D1C0907@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: msabramo/update_tox_ini Changeset: r249:6688125c619c Date: 2014-02-28 18:22 +0000 http://bitbucket.org/pypy/pyrepl/changeset/6688125c619c/ Log: Merge msabramo/hgignore into msabramo/_dev diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -3,3 +3,5 @@ .cache/ \.tox/ .*\.egg-info +\.pyc +\.swp From noreply at buildbot.pypy.org Sat Mar 8 09:28:47 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Sat, 8 Mar 2014 09:28:47 +0100 (CET) Subject: [pypy-commit] pyrepl default: Merged in msabramo/pyrepl/msabramo/update_tox_ini (pull request #6) Message-ID: <20140308082847.B3C0F1C0907@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: Changeset: r250:d173694f875e Date: 2014-03-08 09:28 +0100 http://bitbucket.org/pypy/pyrepl/changeset/d173694f875e/ Log: Merged in msabramo/pyrepl/msabramo/update_tox_ini (pull request #6) tox.ini: envlist = py26, py27, pypy, py33 diff --git a/tox.ini b/tox.ini --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist= py27, py32 +envlist = py26, py27, pypy, py33 [pytest] codechecks = pep8 pyflakes From noreply at buildbot.pypy.org Sat Mar 8 12:01:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 12:01:55 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/d0f79129cbb7 Message-ID: <20140308110155.1730F1C0907@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69794:82f2ddadfc97 Date: 2014-03-08 10:33 +0100 http://bitbucket.org/pypy/pypy/changeset/82f2ddadfc97/ Log: import stmgc/d0f79129cbb7 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -202ea90b3c60 +d0f79129cbb7 diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -474,6 +474,10 @@ stm_thread_local_t *tl = pseg->pub.running_thread; tl->shadowstack = pseg->shadowstack_at_start_of_transaction; tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; + + /* reset these lists to NULL too on abort */ + LIST_FREE(pseg->objects_pointing_to_nursery); + LIST_FREE(pseg->large_overflow_objects); } static void abort_with_mutex(void) diff --git a/rpython/translator/stm/src_stm/stm/list.h b/rpython/translator/stm/src_stm/stm/list.h --- a/rpython/translator/stm/src_stm/stm/list.h +++ b/rpython/translator/stm/src_stm/stm/list.h @@ -1,5 +1,6 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ #include +#include /************************************************************/ diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -49,8 +49,9 @@ tree_contains(STM_PSEGMENT->young_outside_nursery, (uintptr_t)obj)); } -bool _stm_in_nursery(object_t *obj) +long stm_can_move(object_t *obj) { + /* 'long' return value to avoid using 'bool' in the public interface */ return _is_in_nursery(obj); } diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -10,7 +10,6 @@ #include #include -#include #include #include #include @@ -73,10 +72,10 @@ object_t *_stm_allocate_old(ssize_t size_rounded_up); char *_stm_real_address(object_t *o); #ifdef STM_TESTS +#include bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); uint8_t _stm_get_page_flag(uintptr_t index); -bool _stm_in_nursery(object_t *obj); bool _stm_in_transaction(stm_thread_local_t *tl); char *_stm_get_segment_base(long index); void _stm_test_switch(stm_thread_local_t *tl); @@ -109,8 +108,8 @@ #else #define OPT_ASSERT(cond) assert(cond) #endif -#define LIKELY(x) __builtin_expect(x, true) -#define UNLIKELY(x) __builtin_expect(x, false) +#define LIKELY(x) __builtin_expect(x, 1) +#define UNLIKELY(x) __builtin_expect(x, 0) #define IMPLY(a, b) (!(a) || (b)) @@ -270,6 +269,10 @@ long stm_id(object_t *obj); void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash); +/* Returns 1 if the object can still move (it's in the nursery), or 0 + otherwise. After a minor collection no object can move any more. */ +long stm_can_move(object_t *); + /* ==================== END ==================== */ From noreply at buildbot.pypy.org Sat Mar 8 12:01:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 12:01:56 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140308110156.6EA021C0907@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69795:73a1beb10939 Date: 2014-03-08 10:42 +0100 http://bitbucket.org/pypy/pypy/changeset/73a1beb10939/ Log: in-progress diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -14,21 +14,9 @@ WORD = LONG_BIT // 8 NULL = llmemory.NULL -first_gcflag = 1 << (LONG_BIT//2) - -def get_hdr_tid(addr): - return llmemory.cast_adr_to_ptr(addr + StmGC.H_TID, rffi.SIGNEDP) - -def get_hdr_revision(addr): - return llmemory.cast_adr_to_ptr(addr + StmGC.H_REVISION, rffi.SIGNEDP) - -def get_hdr_original(addr): - return llmemory.cast_adr_to_ptr(addr + StmGC.H_ORIGINAL, rffi.SIGNEDP) - - class StmGC(MovingGCBase): _alloc_flavor_ = "raw" inline_simple_malloc = True @@ -51,34 +39,6 @@ TRANSLATION_PARAMS = { } - # keep in sync with stmgc.h & et.h: - GCFLAG_OLD = first_gcflag << 0 - GCFLAG_VISITED = first_gcflag << 1 - GCFLAG_PUBLIC = first_gcflag << 2 - GCFLAG_PREBUILT_ORIGINAL = first_gcflag << 3 - GCFLAG_PUBLIC_TO_PRIVATE = first_gcflag << 4 - GCFLAG_WRITE_BARRIER = first_gcflag << 5 # stmgc.h - GCFLAG_MOVED = first_gcflag << 6 - GCFLAG_BACKUP_COPY = first_gcflag << 7 # debug - GCFLAG_STUB = first_gcflag << 8 # debug - GCFLAG_PRIVATE_FROM_PROTECTED = first_gcflag << 9 - GCFLAG_HAS_ID = first_gcflag << 10 - GCFLAG_IMMUTABLE = first_gcflag << 11 - GCFLAG_SMALLSTUB = first_gcflag << 12 - GCFLAG_MARKED = first_gcflag << 13 - - PREBUILT_FLAGS = first_gcflag * ((1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<13)) - PREBUILT_REVISION = r_uint(1) - - FX_MASK = 65535 - - # keep in sync with nursery.h: - - # maximum size of object in nursery (is actually dependent on - # nursery size, but this should work) - GC_NURSERY_SECTION = 135168 - - def get_type_id(self, obj): return llop.stm_get_tid(llgroup.HALFWORD, obj) @@ -87,21 +47,8 @@ # we implement differently anyway. So directly call GCBase.setup(). GCBase.setup(self) # - llop.stm_initialize(lltype.Void) + llop.stm_setup(lltype.Void) - - def get_original_copy(self, obj): - addr = llmemory.cast_ptr_to_adr(obj) - if bool(get_hdr_tid(addr)[0] & StmGC.GCFLAG_PREBUILT_ORIGINAL): - return obj - # - orig = get_hdr_original(addr)[0] - if orig == 0: - return obj - # - return llmemory.cast_adr_to_ptr(llmemory.cast_int_to_adr(orig), - llmemory.GCREF) - def init_gc_object_immortal(self, addr, typeid16, flags=0): assert flags == 0 assert isinstance(typeid16, llgroup.GroupMemberOffset) @@ -142,12 +89,8 @@ def can_move(self, obj): """Means the reference will stay valid, except if not seen by the GC, then it can get collected.""" - tid = get_hdr_tid(obj)[0] - if bool(tid & StmGC.GCFLAG_OLD): - return False # XXX wrong so far. We should add a flag to the - # object that means "don't ever kill this copy" - return True - + return llop.stm_can_move(lltype.Bool, obj) + @classmethod def JIT_max_size_of_young_obj(cls): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -406,16 +406,12 @@ 'cast_opaque_ptr': LLOp(sideeffects=False), # __________ Software Transactional Memory __________ - # (Note that these operations could also be decomposed into individual - # direct_calls and maybe several casts, but it looks less heavy-weight - # to keep them as operations until the genc stage) - # NOTE: use canmallocgc for all operations that can contain a collection. # that includes all that do 'BecomeInevitable' or otherwise contain # possible GC safe-points! (also sync with stmframework.py) # (some ops like stm_commit_transaction don't need it because there # must be no gc-var access afterwards anyway) - 'stm_initialize': LLOp(canmallocgc=True), + 'stm_setup': LLOp(), 'stm_finalize': LLOp(canmallocgc=True), 'stm_barrier': LLOp(sideeffects=False), 'stm_allocate': LLOp(sideeffects=False, canmallocgc=True), @@ -463,6 +459,8 @@ 'stm_ignored_start': LLOp(canrun=True), 'stm_ignored_stop': LLOp(canrun=True), + 'stm_can_move': LLOp(), + # __________ address operations __________ 'boehm_malloc': LLOp(), diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -625,6 +625,8 @@ OP_JIT_STM_TRANSACTION_BREAK_POINT = _OP_STM OP_JIT_STM_SHOULD_BREAK_TRANSACTION = _OP_STM + OP_STM_CAN_MOVE = _OP_STM + def OP_STM_IGNORED_START(self, op): return '/* stm_ignored_start */' diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -187,7 +187,7 @@ def stm_get_tid(funcgen, op): arg0 = funcgen.expr(op.args[0]) result = funcgen.expr(op.result) - return '%s = stm_get_tid((gcptr)%s);' % (result, arg0) + return '%s = ((struct rpyobj_s*)%s)->tid;' % (result, arg0) def stm_hash(funcgen, op): arg0 = funcgen.expr(op.args[0]) @@ -265,6 +265,11 @@ def stm_major_collect(funcgen, op): return 'stm_major_collect();' +def stm_can_move(funcop, op): + arg0 = funcgen.expr(op.args[0]) + result = funcgen.expr(op.result) + return '%s = stm_can_move(%s);' % (result, arg0) + def op_stm(funcgen, op): func = globals()[op.opname] diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -370,6 +370,8 @@ def insert_stm_barrier(stmtransformer, graph): + return #XXX + """This function uses the following characters for 'categories': * 'A': any general pointer From noreply at buildbot.pypy.org Sat Mar 8 12:01:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 12:01:57 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140308110157.AD86A1C0907@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69796:b1e070ba9ae9 Date: 2014-03-08 12:01 +0100 http://bitbucket.org/pypy/pypy/changeset/b1e070ba9ae9/ Log: in-progress diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -27,9 +27,6 @@ #gcflag_extra = GCFLAG_EXTRA HDR = stmgcintf.GCPTR.TO - H_TID = 0 - H_REVISION = WORD - H_ORIGINAL = WORD * 2 typeid_is_in_field = None VISIT_FPTR = lltype.Ptr(lltype.FuncType([llmemory.Address], lltype.Void)) @@ -40,7 +37,7 @@ } def get_type_id(self, obj): - return llop.stm_get_tid(llgroup.HALFWORD, obj) + return llop.stm_addr_get_tid(llgroup.HALFWORD, obj) def setup(self): # Hack: MovingGCBase.setup() sets up stuff related to id(), which @@ -48,6 +45,7 @@ GCBase.setup(self) # llop.stm_setup(lltype.Void) + llop.stm_register_thread_local(lltype.Void) def init_gc_object_immortal(self, addr, typeid16, flags=0): assert flags == 0 @@ -69,19 +67,21 @@ #ll_assert(not needs_finalizer, 'XXX needs_finalizer') #ll_assert(not is_finalizer_light, 'XXX is_finalizer_light') ll_assert(not contains_weakptr, 'contains_weakptr: use malloc_weakref') - # XXX call optimized versions, e.g. if size < GC_NURSERY_SECTION - return llop.stm_allocate(llmemory.GCREF, size, typeid16) + if size < 16: + size = 16 # minimum size (test usually constant-folded) + return llop.stm_allocate_tid(llmemory.GCREF, size, typeid16) def malloc_varsize_clear(self, typeid16, length, size, itemsize, offset_to_length): - # XXX be careful about overflows, and call optimized versions + # XXX be careful here about overflows totalsize = size + itemsize * length totalsize = llarena.round_up_for_allocation(totalsize) - obj = llop.stm_allocate(llmemory.Address, totalsize, typeid16) - (obj + offset_to_length).signed[0] = length - return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + result = llop.stm_allocate_tid(llmemory.GCREF, totalsize, typeid16) + llop.stm_set_into_obj(lltype.Void, result, offset_to_length, length) + return result def malloc_weakref(self, typeid16, size, obj): + raise NotImplementedError # XXX return llop.stm_weakref_allocate(llmemory.GCREF, size, typeid16, obj) @@ -102,18 +102,15 @@ def collect(self, gen=1): """Do a minor (gen=0) or major (gen>0) collection.""" - if gen > 0: - llop.stm_major_collect(lltype.Void) - else: - llop.stm_minor_collect(lltype.Void) + llop.stm_collect(lltype.Void, gen) def writebarrier_before_copy(self, source_addr, dest_addr, source_start, dest_start, length): ll_assert(False, 'XXX') return False - + def id(self, gcobj): return llop.stm_id(lltype.Signed, gcobj) def identityhash(self, gcobj): - return llop.stm_hash(lltype.Signed, gcobj) + return llop.stm_identityhash(lltype.Signed, gcobj) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -412,54 +412,62 @@ # (some ops like stm_commit_transaction don't need it because there # must be no gc-var access afterwards anyway) 'stm_setup': LLOp(), - 'stm_finalize': LLOp(canmallocgc=True), - 'stm_barrier': LLOp(sideeffects=False), - 'stm_allocate': LLOp(sideeffects=False, canmallocgc=True), - 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), + 'stm_register_thread_local': LLOp(), + 'stm_unregister_thread_local': LLOp(), + 'stm_can_move': LLOp(), + 'stm_allocate_tid': LLOp(sideeffects=False, canmallocgc=True), + 'stm_get_from_obj': LLOp(sideeffects=False), + 'stm_get_from_obj_const': LLOp(canfold=True), + 'stm_set_into_obj': LLOp(), + 'stm_collect': LLOp(canmallocgc=True), + 'stm_id': LLOp(sideeffects=False), + 'stm_identityhash': LLOp(canfold=True), + 'stm_addr_get_tid': LLOp(canfold=True), 'stm_become_inevitable': LLOp(canmallocgc=True), - 'stm_stop_all_other_threads': LLOp(canmallocgc=True), - 'stm_partial_commit_and_resume_other_threads': LLOp(canmallocgc=True), - 'stm_minor_collect': LLOp(canmallocgc=True), - 'stm_major_collect': LLOp(canmallocgc=True), - 'stm_get_tid': LLOp(canfold=True), - 'stm_ptr_eq': LLOp(canfold=True), - 'stm_id': LLOp(sideeffects=False), - 'stm_hash': LLOp(sideeffects=False), 'stm_push_root': LLOp(), 'stm_pop_root_into': LLOp(), - 'stm_commit_transaction': LLOp(canmallocgc=True), - 'stm_begin_inevitable_transaction': LLOp(canmallocgc=True), - 'stm_should_break_transaction': LLOp(sideeffects=False), - 'stm_set_transaction_length': LLOp(canmallocgc=True), - 'stm_change_atomic': LLOp(), - 'stm_get_atomic': LLOp(sideeffects=False), - 'stm_perform_transaction':LLOp(canmallocgc=True), - 'stm_enter_callback_call':LLOp(canmallocgc=True), - 'stm_leave_callback_call':LLOp(canmallocgc=True), - 'stm_abort_and_retry': LLOp(canmallocgc=True), - 'stm_weakref_allocate': LLOp(sideeffects=False, canmallocgc=True), +## 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), +## 'stm_become_inevitable': LLOp(canmallocgc=True), +## 'stm_stop_all_other_threads': LLOp(canmallocgc=True), +## 'stm_partial_commit_and_resume_other_threads': LLOp(canmallocgc=True), +## 'stm_minor_collect': LLOp(canmallocgc=True), +## 'stm_major_collect': LLOp(canmallocgc=True), +## 'stm_get_tid': LLOp(canfold=True), +## 'stm_ptr_eq': LLOp(canfold=True), +## 'stm_id': LLOp(sideeffects=False), +## 'stm_hash': LLOp(sideeffects=False), +## 'stm_commit_transaction': LLOp(canmallocgc=True), +## 'stm_begin_inevitable_transaction': LLOp(canmallocgc=True), +## 'stm_should_break_transaction': LLOp(sideeffects=False), +## 'stm_set_transaction_length': LLOp(canmallocgc=True), +## 'stm_change_atomic': LLOp(), +## 'stm_get_atomic': LLOp(sideeffects=False), +## 'stm_perform_transaction':LLOp(canmallocgc=True), +## 'stm_enter_callback_call':LLOp(canmallocgc=True), +## 'stm_leave_callback_call':LLOp(canmallocgc=True), +## 'stm_abort_and_retry': LLOp(canmallocgc=True), + +## 'stm_weakref_allocate': LLOp(sideeffects=False, canmallocgc=True), - 'stm_threadlocalref_get': LLOp(sideeffects=False), - 'stm_threadlocalref_set': LLOp(canmallocgc=True), # may allocate new array, - # see threadlocalref.py - 'stm_threadlocal_get': LLOp(sideeffects=False), - 'stm_threadlocal_set': LLOp(), +## 'stm_threadlocalref_get': LLOp(sideeffects=False), +## 'stm_threadlocalref_set': LLOp(canmallocgc=True), # may allocate new array, +## # see threadlocalref.py +## 'stm_threadlocal_get': LLOp(sideeffects=False), +## 'stm_threadlocal_set': LLOp(), - 'stm_abort_info_push': LLOp(), - 'stm_abort_info_pop': LLOp(), - 'stm_inspect_abort_info': LLOp(sideeffects=False, canmallocgc=True), +## 'stm_abort_info_push': LLOp(), +## 'stm_abort_info_pop': LLOp(), +## 'stm_inspect_abort_info': LLOp(sideeffects=False, canmallocgc=True), - 'stm_get_adr_of_private_rev_num':LLOp(), - 'stm_get_adr_of_read_barrier_cache':LLOp(), - 'stm_get_adr_of_nursery_current': LLOp(), - 'stm_get_adr_of_nursery_nextlimit': LLOp(), - 'stm_get_adr_of_active': LLOp(), +## 'stm_get_adr_of_private_rev_num':LLOp(), +## 'stm_get_adr_of_read_barrier_cache':LLOp(), +## 'stm_get_adr_of_nursery_current': LLOp(), +## 'stm_get_adr_of_nursery_nextlimit': LLOp(), +## 'stm_get_adr_of_active': LLOp(), - 'stm_ignored_start': LLOp(canrun=True), - 'stm_ignored_stop': LLOp(canrun=True), - - 'stm_can_move': LLOp(), +## 'stm_ignored_start': LLOp(canrun=True), +## 'stm_ignored_stop': LLOp(canrun=True), # __________ address operations __________ diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -296,6 +296,9 @@ label += '_back' yield 'goto %s;' % label + def _is_stm(self): + return getattr(self.db.translator, 'stm_transformation_applied', False) + def gen_op(self, op): macro = 'OP_%s' % op.opname.upper() line = None @@ -303,6 +306,13 @@ meth = getattr(self.gcpolicy, macro, None) if meth: line = meth(self, op) + elif op.opname.startswith('stm_'): + if not self._is_stm(): + raise AssertionError("STM transformation not applied. " + "You need '--stm'") + from rpython.translator.stm import funcgen + func = getattr(funcgen, op.opname) + line = func(self, op) else: meth = getattr(self, macro, None) if meth: @@ -571,69 +581,6 @@ assert isinstance(ARRAY, Array) return '%s = %s.length;'%(self.expr(op.result), expr) - - def _is_stm(self): - return getattr(self.db.translator, 'stm_transformation_applied', False) - - def _OP_STM(self, op): - if not hasattr(self, 'op_stm'): - if not self._is_stm(): - raise AssertionError("STM transformation not applied. " - "You need '--stm'") - from rpython.translator.stm.funcgen import op_stm - self.__class__.op_stm = op_stm - return self.op_stm(op) - OP_STM_INITIALIZE = _OP_STM - OP_STM_FINALIZE = _OP_STM - OP_STM_BECOME_INEVITABLE = _OP_STM - OP_STM_STOP_ALL_OTHER_THREADS = _OP_STM - OP_STM_PARTIAL_COMMIT_AND_RESUME_OTHER_THREADS = _OP_STM - OP_STM_BARRIER = _OP_STM - OP_STM_PTR_EQ = _OP_STM - OP_STM_PUSH_ROOT = _OP_STM - OP_STM_POP_ROOT_INTO = _OP_STM - OP_STM_GET_ADR_OF_NURSERY_CURRENT = _OP_STM - OP_STM_GET_ADR_OF_NURSERY_NEXTLIMIT = _OP_STM - OP_STM_GET_ADR_OF_ACTIVE = _OP_STM - OP_STM_GET_ROOT_STACK_TOP = _OP_STM - OP_STM_GET_ADR_OF_PRIVATE_REV_NUM = _OP_STM - OP_STM_GET_ADR_OF_READ_BARRIER_CACHE= _OP_STM - OP_STM_ALLOCATE = _OP_STM - OP_STM_WEAKREF_ALLOCATE = _OP_STM - OP_STM_GET_TID = _OP_STM - OP_STM_HASH = _OP_STM - OP_STM_ID = _OP_STM - OP_STM_COMMIT_TRANSACTION = _OP_STM - OP_STM_BEGIN_INEVITABLE_TRANSACTION = _OP_STM - OP_STM_SHOULD_BREAK_TRANSACTION = _OP_STM - OP_STM_SET_TRANSACTION_LENGTH = _OP_STM - OP_STM_CHANGE_ATOMIC = _OP_STM - OP_STM_GET_ATOMIC = _OP_STM - OP_STM_THREADLOCAL_GET = _OP_STM - OP_STM_THREADLOCAL_SET = _OP_STM - OP_STM_PERFORM_TRANSACTION = _OP_STM - OP_STM_ENTER_CALLBACK_CALL = _OP_STM - OP_STM_LEAVE_CALLBACK_CALL = _OP_STM - OP_STM_ABORT_AND_RETRY = _OP_STM - OP_STM_ABORT_INFO_PUSH = _OP_STM - OP_STM_ABORT_INFO_POP = _OP_STM - OP_STM_INSPECT_ABORT_INFO = _OP_STM - OP_STM_MAJOR_COLLECT = _OP_STM - OP_STM_MINOR_COLLECT = _OP_STM - OP_STM_CLEAR_EXCEPTION_DATA_ON_ABORT= _OP_STM - OP_STM_ALLOCATE_NONMOVABLE_INT_ADR = _OP_STM - OP_JIT_STM_TRANSACTION_BREAK_POINT = _OP_STM - OP_JIT_STM_SHOULD_BREAK_TRANSACTION = _OP_STM - - OP_STM_CAN_MOVE = _OP_STM - - def OP_STM_IGNORED_START(self, op): - return '/* stm_ignored_start */' - - def OP_STM_IGNORED_STOP(self, op): - return '/* stm_ignored_stop */' - - def OP_PTR_NONZERO(self, op): return '%s = (%s != NULL);' % (self.expr(op.result), self.expr(op.args[0])) diff --git a/rpython/translator/c/primitive.py b/rpython/translator/c/primitive.py --- a/rpython/translator/c/primitive.py +++ b/rpython/translator/c/primitive.py @@ -237,7 +237,7 @@ Bool: 'bool_t @', Void: 'void @', Address: 'void* @', - GCREF: 'void* @', + GCREF: 'rpy_gc_char *@', } def define_c_primitive(ll_type, c_name, suffix=''): diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -12,6 +12,13 @@ #ifdef RPY_STM +typedef stm_char rpy_gc_char; +#else +typedef char rpy_gc_char; +#endif + + +#ifdef RPY_STM void _pypy_stm_free(void *); #define _OP_RAW_MALLOCED(r) stm_call_on_abort(r, _pypy_stm_free) #define _OP_RAW_STM_UNREGISTER(r) stm_call_on_abort(r, NULL) diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -2,6 +2,7 @@ from rpython.translator.c.support import c_string_constant, cdecl from rpython.translator.c.node import Node, ContainerNode from rpython.translator.c.primitive import name_small_integer +from rpython.rtyper.lltypesystem import llmemory class StmHeaderOpaqueDefNode(Node): @@ -42,72 +43,61 @@ # self.obj.prebuilt_hash -def stm_initialize(funcgen, op): - return '''stm_initialize(); - stm_clear_on_abort(&pypy_g_ExcData.ed_exc_type, - sizeof(struct pypy_object0 *)); - ''' +def stm_setup(funcgen, op): + return 'stm_setup();' -def jit_stm_transaction_break_point(funcgen, op): - return '/* jit_stm_transaction_break_point */' +def stm_register_thread_local(funcgen, op): + return 'stm_register_thread_local(&stm_thread_local);' -def jit_stm_should_break_transaction(funcgen, op): +def stm_unregister_thread_local(funcgen, op): + return 'stm_unregister_thread_local(&stm_thread_local);' + +def stm_can_move(funcop, op): + arg0 = funcgen.expr(op.args[0]) result = funcgen.expr(op.result) - return '%s = 0; /* jit_stm_should_break_transaction */' % (result, ) - -def stm_finalize(funcgen, op): - return 'stm_finalize();' + return '%s = stm_can_move(%s);' % (result, arg0) -def stm_barrier(funcgen, op): - category_change = op.args[0].value - # XXX: how to unify the stm_barrier llop generation in - # writebarrier.py and threadlocalref.py? - if isinstance(category_change, str): - frm, middle, to = category_change - else: # rstr - frm, middle, to = (category_change.chars[0], - category_change.chars[1], - category_change.chars[2]) - assert middle == '2' - assert frm < to - if to == 'W': - if frm >= 'V': - funcname = 'stm_repeat_write_barrier' - else: - funcname = 'stm_write_barrier' - elif to == 'V': - funcname = 'stm_write_barrier_noptr' - elif to == 'R': - if frm >= 'Q': - funcname = 'stm_repeat_read_barrier' - else: - funcname = 'stm_read_barrier' - elif to == 'I': - funcname = 'stm_immut_read_barrier' - else: - raise AssertionError(category_change) - assert op.args[1].concretetype == op.result.concretetype - arg = funcgen.expr(op.args[1]) +def stm_allocate_tid(funcgen, op): + arg_size = funcgen.expr(op.args[0]) + arg_type_id = funcgen.expr(op.args[1]) + result = funcgen.expr(op.result) + return ('%s = stm_allocate(%s); ' % (result, arg_size) + + '((rpyobj_t *)%s)->type_id = %s;' % (result, arg_type_id)) + +def stm_get_from_obj(funcgen, op): + assert op.args[0].concretetype == llmemory.GCREF + arg_obj = funcgen.expr(op.args[0]) + arg_ofs = funcgen.expr(op.args[1]) + result = funcgen.expr(op.result) + resulttype = cdecl(funcgen.lltypename(op.result), '') + return '%s = *(%s *)(%s + %s);' % (result, resulttype, arg_obj, arg_ofs) + +stm_get_from_obj_const = stm_get_from_obj + +def stm_set_into_obj(funcgen, op): + assert op.args[0].concretetype == llmemory.GCREF + arg_obj = funcgen.expr(op.args[0]) + arg_ofs = funcgen.expr(op.args[1]) + arg_val = funcgen.expr(op.args[2]) + valtype = cdecl(funcgen.lltypename(op.args[2]), '') + return '*(%s *)(%s + %s) = %s;' % (valtype, arg_obj, arg_ofs, arg_val) + +def stm_collect(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + return 'stm_collect(%s);' % (arg0,) + +def stm_id(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + return 'stm_id((object_t *)%s);' % (arg0,) + +def stm_identityhash(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + return 'stm_identityhash((object_t *)%s);' % (arg0,) + +def stm_addr_get_tid(funcgen, op): + arg0 = funcgen.expr(op.args[0]) result = funcgen.expr(op.result) - return '%s = (%s)%s((gcptr)%s);' % ( - result, cdecl(funcgen.lltypename(op.result), ''), - funcname, arg) - -def stm_ptr_eq(funcgen, op): - args = [funcgen.expr(v) for v in op.args] - result = funcgen.expr(op.result) - # check for prebuilt arguments - for i, j in [(0, 1), (1, 0)]: - if isinstance(op.args[j], Constant): - if op.args[j].value: # non-NULL - return ('%s = stm_pointer_equal_prebuilt((gcptr)%s, (gcptr)%s);' - % (result, args[i], args[j])) - else: - # this case might be unreachable, but better safe than sorry - return '%s = (%s == NULL);' % (result, args[i]) - # - return '%s = stm_pointer_equal((gcptr)%s, (gcptr)%s);' % ( - result, args[0], args[1]) + return '%s = ((struct rpyobj_s *)%s)->type_id;' % (result, arg0) def stm_become_inevitable(funcgen, op): try: @@ -117,160 +107,218 @@ string_literal = c_string_constant(info) return 'stm_become_inevitable(%s);' % (string_literal,) -def stm_stop_all_other_threads(funcgen, op): - return 'stm_stop_all_other_threads();' - -def stm_partial_commit_and_resume_other_threads(funcgen, op): - return 'stm_partial_commit_and_resume_other_threads();' - def stm_push_root(funcgen, op): arg0 = funcgen.expr(op.args[0]) - return 'stm_push_root((gcptr)%s);' % (arg0,) + return 'STM_PUSH_ROOT(stm_thread_local, %s);' % (arg0,) def stm_pop_root_into(funcgen, op): arg0 = funcgen.expr(op.args[0]) if isinstance(op.args[0], Constant): - return '/* %s = */ stm_pop_root();' % (arg0,) - return '%s = (%s)stm_pop_root();' % ( - arg0, cdecl(funcgen.lltypename(op.args[0]), '')) + return '/* %s = */ STM_POP_ROOT_RET(stm_thread_local);' % (arg0,) + return 'STM_POP_ROOT(stm_thread_local, %s);' % (arg0,) -def stm_get_adr_of_nursery_current(funcgen, op): - result = funcgen.expr(op.result) - return '%s = (%s)&stm_nursery_current;' % ( - result, cdecl(funcgen.lltypename(op.result), '')) -def stm_get_adr_of_nursery_nextlimit(funcgen, op): - result = funcgen.expr(op.result) - return '%s = (%s)&stm_nursery_nextlimit;' % ( - result, cdecl(funcgen.lltypename(op.result), '')) +##def stm_initialize(funcgen, op): +## return '''stm_initialize(); +## stm_clear_on_abort(&pypy_g_ExcData, sizeof(pypy_g_ExcData)); +## ''' -def stm_get_adr_of_active(funcgen, op): - result = funcgen.expr(op.result) - return '%s = (%s)&stm_active;' % ( - result, cdecl(funcgen.lltypename(op.result), '')) +##def jit_stm_transaction_break_point(funcgen, op): +## return '/* jit_stm_transaction_break_point */' + +##def jit_stm_should_break_transaction(funcgen, op): +## result = funcgen.expr(op.result) +## return '%s = 0; /* jit_stm_should_break_transaction */' % (result, ) -def stm_get_root_stack_top(funcgen, op): - result = funcgen.expr(op.result) - return '%s = (%s)&stm_shadowstack;' % ( - result, cdecl(funcgen.lltypename(op.result), '')) +##def stm_finalize(funcgen, op): +## return 'stm_finalize();' -def stm_get_adr_of_private_rev_num(funcgen, op): - result = funcgen.expr(op.result) - return '%s = (%s)&stm_private_rev_num;' % ( - result, cdecl(funcgen.lltypename(op.result), '')) +##def stm_barrier(funcgen, op): +## category_change = op.args[0].value +## # XXX: how to unify the stm_barrier llop generation in +## # writebarrier.py and threadlocalref.py? +## if isinstance(category_change, str): +## frm, middle, to = category_change +## else: # rstr +## frm, middle, to = (category_change.chars[0], +## category_change.chars[1], +## category_change.chars[2]) +## assert middle == '2' +## assert frm < to +## if to == 'W': +## if frm >= 'V': +## funcname = 'stm_repeat_write_barrier' +## else: +## funcname = 'stm_write_barrier' +## elif to == 'V': +## funcname = 'stm_write_barrier_noptr' +## elif to == 'R': +## if frm >= 'Q': +## funcname = 'stm_repeat_read_barrier' +## else: +## funcname = 'stm_read_barrier' +## elif to == 'I': +## funcname = 'stm_immut_read_barrier' +## else: +## raise AssertionError(category_change) +## assert op.args[1].concretetype == op.result.concretetype +## arg = funcgen.expr(op.args[1]) +## result = funcgen.expr(op.result) +## return '%s = (%s)%s((gcptr)%s);' % ( +## result, cdecl(funcgen.lltypename(op.result), ''), +## funcname, arg) -def stm_get_adr_of_read_barrier_cache(funcgen, op): - result = funcgen.expr(op.result) - return '%s = (%s)&stm_read_barrier_cache;' % ( - result, cdecl(funcgen.lltypename(op.result), '')) +##def stm_ptr_eq(funcgen, op): +## args = [funcgen.expr(v) for v in op.args] +## result = funcgen.expr(op.result) +## # check for prebuilt arguments +## for i, j in [(0, 1), (1, 0)]: +## if isinstance(op.args[j], Constant): +## if op.args[j].value: # non-NULL +## return ('%s = stm_pointer_equal_prebuilt((gcptr)%s, (gcptr)%s);' +## % (result, args[i], args[j])) +## else: +## # this case might be unreachable, but better safe than sorry +## return '%s = (%s == NULL);' % (result, args[i]) +## # +## return '%s = stm_pointer_equal((gcptr)%s, (gcptr)%s);' % ( +## result, args[0], args[1]) + +##def stm_become_inevitable(funcgen, op): +## try: +## info = op.args[0].value +## except IndexError: +## info = "rstm.become_inevitable" # cannot insert it in 'llop' +## string_literal = c_string_constant(info) +## return 'stm_become_inevitable(%s);' % (string_literal,) + +##def stm_stop_all_other_threads(funcgen, op): +## return 'stm_stop_all_other_threads();' + +##def stm_partial_commit_and_resume_other_threads(funcgen, op): +## return 'stm_partial_commit_and_resume_other_threads();' + +##def stm_get_adr_of_nursery_current(funcgen, op): +## result = funcgen.expr(op.result) +## return '%s = (%s)&stm_nursery_current;' % ( +## result, cdecl(funcgen.lltypename(op.result), '')) + +##def stm_get_adr_of_nursery_nextlimit(funcgen, op): +## result = funcgen.expr(op.result) +## return '%s = (%s)&stm_nursery_nextlimit;' % ( +## result, cdecl(funcgen.lltypename(op.result), '')) + +##def stm_get_adr_of_active(funcgen, op): +## result = funcgen.expr(op.result) +## return '%s = (%s)&stm_active;' % ( +## result, cdecl(funcgen.lltypename(op.result), '')) +##def stm_get_root_stack_top(funcgen, op): +## result = funcgen.expr(op.result) +## return '%s = (%s)&stm_shadowstack;' % ( +## result, cdecl(funcgen.lltypename(op.result), '')) + +##def stm_get_adr_of_private_rev_num(funcgen, op): +## result = funcgen.expr(op.result) +## return '%s = (%s)&stm_private_rev_num;' % ( +## result, cdecl(funcgen.lltypename(op.result), '')) + +##def stm_get_adr_of_read_barrier_cache(funcgen, op): +## result = funcgen.expr(op.result) +## return '%s = (%s)&stm_read_barrier_cache;' % ( +## result, cdecl(funcgen.lltypename(op.result), '')) -def stm_weakref_allocate(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - arg1 = funcgen.expr(op.args[1]) - arg2 = funcgen.expr(op.args[2]) - result = funcgen.expr(op.result) - return '%s = stm_weakref_allocate(%s, %s, %s);' % (result, arg0, - arg1, arg2) + +##def stm_weakref_allocate(funcgen, op): +## arg0 = funcgen.expr(op.args[0]) +## arg1 = funcgen.expr(op.args[1]) +## arg2 = funcgen.expr(op.args[2]) +## result = funcgen.expr(op.result) +## return '%s = stm_weakref_allocate(%s, %s, %s);' % (result, arg0, +## arg1, arg2) -def stm_allocate_nonmovable_int_adr(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - result = funcgen.expr(op.result) - return '%s = stm_allocate_public_integer_address(%s);' % (result, arg0) - -def stm_allocate(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - arg1 = funcgen.expr(op.args[1]) - result = funcgen.expr(op.result) - return '%s = stm_allocate(%s, %s);' % (result, arg0, arg1) +##def stm_allocate_nonmovable_int_adr(funcgen, op): +## arg0 = funcgen.expr(op.args[0]) +## result = funcgen.expr(op.result) +## return '%s = stm_allocate_public_integer_address(%s);' % (result, arg0) -def stm_get_tid(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - result = funcgen.expr(op.result) - return '%s = ((struct rpyobj_s*)%s)->tid;' % (result, arg0) +##def stm_get_tid(funcgen, op): +## arg0 = funcgen.expr(op.args[0]) +## result = funcgen.expr(op.result) +## return '%s = ((struct rpyobj_s*)%s)->tid;' % (result, arg0) -def stm_hash(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - result = funcgen.expr(op.result) - return '%s = stm_hash((gcptr)%s);' % (result, arg0) +##def stm_hash(funcgen, op): +## arg0 = funcgen.expr(op.args[0]) +## result = funcgen.expr(op.result) +## return '%s = stm_hash((gcptr)%s);' % (result, arg0) -def stm_id(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - result = funcgen.expr(op.result) - return '%s = stm_id((gcptr)%s);' % (result, arg0) +##def stm_id(funcgen, op): +## arg0 = funcgen.expr(op.args[0]) +## result = funcgen.expr(op.result) +## return '%s = stm_id((gcptr)%s);' % (result, arg0) -def stm_commit_transaction(funcgen, op): - return '{ int e = errno; stm_commit_transaction(); errno = e; }' +##def stm_commit_transaction(funcgen, op): +## return '{ int e = errno; stm_commit_transaction(); errno = e; }' -def stm_begin_inevitable_transaction(funcgen, op): - return '{ int e = errno; stm_begin_inevitable_transaction(); errno = e; }' +##def stm_begin_inevitable_transaction(funcgen, op): +## return '{ int e = errno; stm_begin_inevitable_transaction(); errno = e; }' -def stm_should_break_transaction(funcgen, op): - result = funcgen.expr(op.result) - return '%s = stm_should_break_transaction();' % (result,) +##def stm_should_break_transaction(funcgen, op): +## result = funcgen.expr(op.result) +## return '%s = stm_should_break_transaction();' % (result,) -def stm_set_transaction_length(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - return 'stm_set_transaction_length(%s);' % (arg0,) +##def stm_set_transaction_length(funcgen, op): +## arg0 = funcgen.expr(op.args[0]) +## return 'stm_set_transaction_length(%s);' % (arg0,) -def stm_change_atomic(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - return 'stm_atomic(%s);' % (arg0,) +##def stm_change_atomic(funcgen, op): +## arg0 = funcgen.expr(op.args[0]) +## return 'stm_atomic(%s);' % (arg0,) -def stm_get_atomic(funcgen, op): - result = funcgen.expr(op.result) - return '%s = stm_atomic(0);' % (result,) +##def stm_get_atomic(funcgen, op): +## result = funcgen.expr(op.result) +## return '%s = stm_atomic(0);' % (result,) -def stm_threadlocal_get(funcgen, op): - result = funcgen.expr(op.result) - return '%s = (%s)stm_thread_local_obj;' % ( - result, cdecl(funcgen.lltypename(op.result), '')) +##def stm_threadlocal_get(funcgen, op): +## result = funcgen.expr(op.result) +## return '%s = (%s)stm_thread_local_obj;' % ( +## result, cdecl(funcgen.lltypename(op.result), '')) -def stm_threadlocal_set(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - return 'stm_thread_local_obj = (gcptr)%s;' % (arg0,) +##def stm_threadlocal_set(funcgen, op): +## arg0 = funcgen.expr(op.args[0]) +## return 'stm_thread_local_obj = (gcptr)%s;' % (arg0,) -def stm_perform_transaction(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - arg1 = funcgen.expr(op.args[1]) - return 'stm_perform_transaction((gcptr)%s, %s);' % (arg0, arg1) +##def stm_perform_transaction(funcgen, op): +## arg0 = funcgen.expr(op.args[0]) +## arg1 = funcgen.expr(op.args[1]) +## return 'stm_perform_transaction((gcptr)%s, %s);' % (arg0, arg1) -def stm_enter_callback_call(funcgen, op): - result = funcgen.expr(op.result) - return '%s = stm_enter_callback_call();' % (result,) +##def stm_enter_callback_call(funcgen, op): +## result = funcgen.expr(op.result) +## return '%s = stm_enter_callback_call();' % (result,) -def stm_leave_callback_call(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - return 'stm_leave_callback_call(%s);' % (arg0,) +##def stm_leave_callback_call(funcgen, op): +## arg0 = funcgen.expr(op.args[0]) +## return 'stm_leave_callback_call(%s);' % (arg0,) -def stm_abort_and_retry(funcgen, op): - return 'stm_abort_and_retry();' +##def stm_abort_and_retry(funcgen, op): +## return 'stm_abort_and_retry();' -def stm_abort_info_push(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - arg1 = funcgen.expr(op.args[1]) - return 'stm_abort_info_push((gcptr)%s, %s);' % (arg0, arg1) +##def stm_abort_info_push(funcgen, op): +## arg0 = funcgen.expr(op.args[0]) +## arg1 = funcgen.expr(op.args[1]) +## return 'stm_abort_info_push((gcptr)%s, %s);' % (arg0, arg1) -def stm_abort_info_pop(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - return 'stm_abort_info_pop(%s);' % (arg0,) +##def stm_abort_info_pop(funcgen, op): +## arg0 = funcgen.expr(op.args[0]) +## return 'stm_abort_info_pop(%s);' % (arg0,) -def stm_inspect_abort_info(funcgen, op): - result = funcgen.expr(op.result) - return '%s = stm_inspect_abort_info();' % (result,) +##def stm_inspect_abort_info(funcgen, op): +## result = funcgen.expr(op.result) +## return '%s = stm_inspect_abort_info();' % (result,) -def stm_minor_collect(funcgen, op): - return 'stm_minor_collect();' +##def stm_minor_collect(funcgen, op): +## return 'stm_minor_collect();' -def stm_major_collect(funcgen, op): - return 'stm_major_collect();' - -def stm_can_move(funcop, op): - arg0 = funcgen.expr(op.args[0]) - result = funcgen.expr(op.result) - return '%s = stm_can_move(%s);' % (result, arg0) - - -def op_stm(funcgen, op): - func = globals()[op.opname] - return func(funcgen, op) +##def stm_major_collect(funcgen, op): +## return 'stm_major_collect();' diff --git a/rpython/translator/stm/stmgcintf.py b/rpython/translator/stm/stmgcintf.py --- a/rpython/translator/stm/stmgcintf.py +++ b/rpython/translator/stm/stmgcintf.py @@ -11,6 +11,8 @@ #include "src_stm/stmgc.h" +__thread struct stm_thread_local_s stm_thread_local; + extern Signed pypy_stmcb_size(void*); extern void pypy_stmcb_trace(void*, void(*)(void*)); @@ -29,6 +31,8 @@ include_dirs = [cdir, cdir2], includes = ['src_stm/stmgc.h'], pre_include_bits = ['#define RPY_STM 1'], + post_include_bits = [ + 'extern __thread struct stm_thread_local_s stm_thread_local;'], separate_module_sources = [separate_source], ) From noreply at buildbot.pypy.org Sat Mar 8 12:14:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 12:14:01 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fixes. Now we enter the land of clang crashes. Message-ID: <20140308111401.95C781C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69797:f380d1adebbc Date: 2014-03-08 12:13 +0100 http://bitbucket.org/pypy/pypy/changeset/f380d1adebbc/ Log: Fixes. Now we enter the land of clang crashes. diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -772,7 +772,10 @@ print >> f, '\tstruct object_s lib;' print >> f, '\tuint32_t tid;' print >> f, '} rpyobj_t;' - print >> f + print >> f, 'typedef TLPREFIX char rpygcchar_t;' + else: + print >> f, 'typedef char rpygcchar_t;' + print >> f for node in structdeflist: if hasattr(node, 'forward_decl'): if node.forward_decl: diff --git a/rpython/translator/c/primitive.py b/rpython/translator/c/primitive.py --- a/rpython/translator/c/primitive.py +++ b/rpython/translator/c/primitive.py @@ -237,7 +237,7 @@ Bool: 'bool_t @', Void: 'void @', Address: 'void* @', - GCREF: 'rpy_gc_char *@', + GCREF: 'rpygcchar_t *@', } def define_c_primitive(ll_type, c_name, suffix=''): diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -12,13 +12,6 @@ #ifdef RPY_STM -typedef stm_char rpy_gc_char; -#else -typedef char rpy_gc_char; -#endif - - -#ifdef RPY_STM void _pypy_stm_free(void *); #define _OP_RAW_MALLOCED(r) stm_call_on_abort(r, _pypy_stm_free) #define _OP_RAW_STM_UNREGISTER(r) stm_call_on_abort(r, NULL) diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -61,8 +61,9 @@ arg_size = funcgen.expr(op.args[0]) arg_type_id = funcgen.expr(op.args[1]) result = funcgen.expr(op.result) - return ('%s = stm_allocate(%s); ' % (result, arg_size) + - '((rpyobj_t *)%s)->type_id = %s;' % (result, arg_type_id)) + # XXX NULL returns? + return ('%s = (rpygcchar_t *)stm_allocate(%s); ' % (result, arg_size) + + '((rpyobj_t *)%s)->tid = %s;' % (result, arg_type_id)) def stm_get_from_obj(funcgen, op): assert op.args[0].concretetype == llmemory.GCREF @@ -70,7 +71,8 @@ arg_ofs = funcgen.expr(op.args[1]) result = funcgen.expr(op.result) resulttype = cdecl(funcgen.lltypename(op.result), '') - return '%s = *(%s *)(%s + %s);' % (result, resulttype, arg_obj, arg_ofs) + return '%s = *(TLPREFIX %s *)(%s + %s);' % ( + result, resulttype, arg_obj, arg_ofs) stm_get_from_obj_const = stm_get_from_obj @@ -80,7 +82,8 @@ arg_ofs = funcgen.expr(op.args[1]) arg_val = funcgen.expr(op.args[2]) valtype = cdecl(funcgen.lltypename(op.args[2]), '') - return '*(%s *)(%s + %s) = %s;' % (valtype, arg_obj, arg_ofs, arg_val) + return '*(TLPREFIX %s *)(%s + %s) = %s;' % ( + valtype, arg_obj, arg_ofs, arg_val) def stm_collect(funcgen, op): arg0 = funcgen.expr(op.args[0]) @@ -97,7 +100,7 @@ def stm_addr_get_tid(funcgen, op): arg0 = funcgen.expr(op.args[0]) result = funcgen.expr(op.result) - return '%s = ((struct rpyobj_s *)%s)->type_id;' % (result, arg0) + return '%s = ((struct rpyobj_s *)%s)->tid;' % (result, arg0) def stm_become_inevitable(funcgen, op): try: From noreply at buildbot.pypy.org Sat Mar 8 12:31:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 12:31:19 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: The code is invalid (but clang doesn't complain). Add a clear exception, which crashes now. Message-ID: <20140308113119.C48CF1C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69798:b5683d493a95 Date: 2014-03-08 12:30 +0100 http://bitbucket.org/pypy/pypy/changeset/b5683d493a95/ Log: The code is invalid (but clang doesn't complain). Add a clear exception, which crashes now. diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -645,6 +645,17 @@ def OP_CAST_POINTER(self, op): TYPE = self.lltypemap(op.result) + if self._is_stm(): + TSRC = self.lltypemap(op.args[0]) + gcdst = isinstance(TYPE, Ptr) and TYPE.TO._gckind == 'gc' + gcsrc = isinstance(TSRC, Ptr) and TSRC.TO._gckind == 'gc' + if gcsrc != gcdst: + raise Exception( + "stm: cast between pointer types changes the address space\n" + " func: %s\n" + " op: %s\n" + " from: %s\n" + " to: %s" % (self.graph, op, TSRC, TYPE)) typename = self.db.gettype(TYPE) result = [] result.append('%s = (%s)%s;' % (self.expr(op.result), @@ -654,13 +665,7 @@ OP_CAST_ADR_TO_PTR = OP_CAST_POINTER OP_CAST_OPAQUE_PTR = OP_CAST_POINTER - - def OP_CAST_PTR_TO_ADR(self, op): - #if self.lltypemap(op.args[0]).TO._gckind == 'gc' and self._is_stm(): - # from pypy.translator.c.support import log - # log.WARNING("cast_ptr_to_adr(gcref) might be a bad idea with STM:") - # log.WARNING(" %r" % (self.graph,)) - return self.OP_CAST_POINTER(op) + OP_CAST_PTR_TO_ADR = OP_CAST_POINTER def OP_CAST_INT_TO_PTR(self, op): TYPE = self.lltypemap(op.result) From noreply at buildbot.pypy.org Sat Mar 8 12:45:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 12:45:42 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140308114542.CB7101C0906@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69799:8270fc367b9d Date: 2014-03-08 12:45 +0100 http://bitbucket.org/pypy/pypy/changeset/8270fc367b9d/ Log: in-progress diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -725,6 +725,7 @@ self.pop_roots(hop, livevars) def gct_gc_can_move(self, hop): + assert not self.translator.config.translation.stm, "XXX" op = hop.spaceop v_addr = hop.genop('cast_ptr_to_adr', [op.args[0]], resulttype=llmemory.Address) @@ -735,6 +736,7 @@ if self.shrink_array_ptr is None: return GCTransformer.gct_shrink_array(self, hop) op = hop.spaceop + assert not self.translator.config.translation.stm, "XXX" v_addr = hop.genop('cast_ptr_to_adr', [op.args[0]], resulttype=llmemory.Address) v_length = op.args[1] @@ -776,6 +778,7 @@ ofs = llmemory.offsetof(self.c_const_gc.concretetype.TO, 'inst_' + attrname) c_ofs = rmodel.inputconst(lltype.Signed, ofs) + assert not self.translator.config.translation.stm, "XXX" v_gc_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gc], resulttype=llmemory.Address) hop.genop('adr_add', [v_gc_adr, c_ofs], resultvar=op.result) @@ -790,6 +793,7 @@ ofs = llmemory.offsetof(self.c_const_gcdata.concretetype.TO, 'inst_' + attrname) c_ofs = rmodel.inputconst(lltype.Signed, ofs) + assert not self.translator.config.translation.stm, "XXX" v_gcdata_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gcdata], resulttype=llmemory.Address) hop.genop('adr_add', [v_gcdata_adr, c_ofs], resultvar=op.result) diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -56,6 +56,11 @@ for var in reversed(livevars): hop.genop("stm_pop_root_into", [var]) + def transform_generic_set(self, hop): + opname = hop.spaceop.opname + # XXX DO STUFF HERE + hop.rename('bare_' + opname) + def gc_header_for(self, obj, needs_hash=False): return self.gcdata.gc.gcheaderbuilder.header_of_object(obj) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -108,6 +108,14 @@ """ # xxx Warning: same note as above apply: don't do this at home assert length >= 0 + + if rgc.stm_is_enabled(): + i = 0 + while i < length: + ptrdst[i] = src.chars[srcstart + i] + i += 1 + return + # from here, no GC operations can happen src = _get_raw_buf(SRC_TP, src, srcstart) adr = llmemory.cast_ptr_to_adr(ptrdst) From noreply at buildbot.pypy.org Sat Mar 8 12:48:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 12:48:56 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fix Message-ID: <20140308114856.C5E961C0906@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69800:4bbc728b5079 Date: 2014-03-08 12:48 +0100 http://bitbucket.org/pypy/pypy/changeset/4bbc728b5079/ Log: fix diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -21,7 +21,7 @@ return [] def c_struct_field_name(self, _): - return 'h_tid' + return 'tid' class StmHeader_OpaqueNode(ContainerNode): From noreply at buildbot.pypy.org Sat Mar 8 13:05:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 13:05:16 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: progress! Message-ID: <20140308120516.23E631D274E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69801:4b10a8645bb0 Date: 2014-03-08 13:04 +0100 http://bitbucket.org/pypy/pypy/changeset/4b10a8645bb0/ Log: progress! diff --git a/rpython/translator/c/src/mem.c b/rpython/translator/c/src/mem.c --- a/rpython/translator/c/src/mem.c +++ b/rpython/translator/c/src/mem.c @@ -45,7 +45,7 @@ static struct pypy_debug_alloc_s *pypy_debug_alloc_list = NULL; #ifdef RPY_STM -# include "src_stm/atomic_ops.h" +// spinlock_acquire/spinlock_release defined in ../../stm/src_stm/stmgcintf.h static revision_t pypy_debug_alloc_lock = 0; #else # define spinlock_acquire(lock, targetvalue) /* nothing */ diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -0,0 +1,17 @@ +#include "src_stm/stmgcintf.h" + +__thread struct stm_thread_local_s stm_thread_local; + +extern Signed pypy_stmcb_size(void*); +extern void pypy_stmcb_trace(void*, void(*)(void*)); + +inline size_t stmcb_size(struct object_s *obj) { + return pypy_stmcb_size(obj); +} + +inline void stmcb_trace(struct object_s *obj, void visit(object_t **)) { + pypy_stmcb_trace(obj, (void(*)(void*))visit); +} + +/* "include" the stmgc.c file here */ +#include "src_stm/stmgc.c" diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -0,0 +1,27 @@ +/* meant to be #included after src_stm/stmgc.h */ + +#include "stmgc.h" +#include "stm/atomic.h" /* for spin_loop() and write_fence() */ + +extern __thread struct stm_thread_local_s stm_thread_local; + + +#if 0 /* fprinting versions */ +# define spinlock_acquire(lock, targetvalue) \ + do { if (__sync_bool_compare_and_swap(&(lock), 0, (targetvalue))) { \ + dprintf(("<<< locked %d\n", (int)targetvalue)); \ + break; \ + } \ + do { spin_loop(); } while (lock); \ + } while (1) +# define spinlock_release(lock) \ + do { dprintf(("unlocked >>>\n")); write_fence(); \ + assert((lock) != 0); (lock) = 0; } while (0) +#else +# define spinlock_acquire(lock, targetvalue) \ + do { if (__sync_bool_compare_and_swap(&(lock), 0, (targetvalue))) break; \ + do { spin_loop(); } while (lock); \ + } while (1) +# define spinlock_release(lock) \ + do { write_fence(); assert((lock) != 0); (lock) = 0; } while (0) +#endif diff --git a/rpython/translator/stm/stmgcintf.py b/rpython/translator/stm/stmgcintf.py --- a/rpython/translator/stm/stmgcintf.py +++ b/rpython/translator/stm/stmgcintf.py @@ -6,33 +6,14 @@ cdir = os.path.abspath(os.path.join(cdir2, '..', 'stm')) -separate_source = ''' -//#define _GC_DEBUG 2 /* XXX move elsewhere */ - -#include "src_stm/stmgc.h" - -__thread struct stm_thread_local_s stm_thread_local; - -extern Signed pypy_stmcb_size(void*); -extern void pypy_stmcb_trace(void*, void(*)(void*)); - -inline size_t stmcb_size(struct object_s *obj) { - return pypy_stmcb_size(obj); -} - -inline void stmcb_trace(struct object_s *obj, void visit(object_t **)) { - pypy_stmcb_trace(obj, (void(*)(void*))visit); -} - -#include "src_stm/stmgc.c" -''' +_f = open(os.path.join(cdir, 'src_stm', 'stmgcintf.c'), 'r') +separate_source = _f.read() +_f.close() eci = ExternalCompilationInfo( include_dirs = [cdir, cdir2], - includes = ['src_stm/stmgc.h'], + includes = ['src_stm/stmgcintf.h'], pre_include_bits = ['#define RPY_STM 1'], - post_include_bits = [ - 'extern __thread struct stm_thread_local_s stm_thread_local;'], separate_module_sources = [separate_source], ) From noreply at buildbot.pypy.org Sat Mar 8 13:20:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 13:20:10 +0100 (CET) Subject: [pypy-commit] stmgc default: Small additions for pypy Message-ID: <20140308122010.562AE1D28A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r967:2d15ef133bde Date: 2014-03-08 13:19 +0100 http://bitbucket.org/pypy/stmgc/changeset/2d15ef133bde/ Log: Small additions for pypy diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -1,4 +1,4 @@ -#define _GNU_SOURCE +#define _GNU_SOURCE 1 #include "stmgc.h" #include "stm/atomic.h" #include "stm/list.h" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -199,6 +199,7 @@ transaction. */ #define STM_PUSH_ROOT(tl, p) (*((tl).shadowstack++) = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))*(--(tl).shadowstack)) +#define STM_POP_ROOT_RET(tl) (*(--(tl).shadowstack)) /* Every thread needs to have a corresponding stm_thread_local_t From noreply at buildbot.pypy.org Sat Mar 8 13:22:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 13:22:46 +0100 (CET) Subject: [pypy-commit] stmgc default: Protect this header Message-ID: <20140308122246.13AF21D28A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r968:b4a037995423 Date: 2014-03-08 13:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/b4a037995423/ Log: Protect this header diff --git a/c7/stm/atomic.h b/c7/stm/atomic.h --- a/c7/stm/atomic.h +++ b/c7/stm/atomic.h @@ -1,3 +1,5 @@ +#ifndef _STM_ATOMIC_H +#define _STM_ATOMIC_H /* spin_loop() corresponds to the PAUSE instruction on x86. On other architectures, we generate no instruction (but still need @@ -32,3 +34,6 @@ static inline void write_fence(void) { __sync_synchronize(); } #endif + + +#endif /* _STM_ATOMIC_H */ From noreply at buildbot.pypy.org Sat Mar 8 13:23:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 13:23:43 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140308122343.09EAD1D28A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69802:af15056321f7 Date: 2014-03-08 13:20 +0100 http://bitbucket.org/pypy/pypy/changeset/af15056321f7/ Log: in-progress diff --git a/rpython/translator/c/src/debug_print.c b/rpython/translator/c/src/debug_print.c --- a/rpython/translator/c/src/debug_print.c +++ b/rpython/translator/c/src/debug_print.c @@ -162,18 +162,17 @@ } #ifdef RPY_STM -# include -# include +# include +# define bool_cas __sync_bool_compare_and_swap #else - typedef long revision_t; # define bool_cas(vp, o, n) (*(vp)=(n), 1) # define dprintfcolor() 0 #endif -static revision_t threadcounter = 0; +static Signed threadcounter = 0; static void _prepare_display_colors(void) { - revision_t counter; + Signed counter; char *p; while (1) { counter = threadcounter; diff --git a/rpython/translator/c/src/g_prerequisite.h b/rpython/translator/c/src/g_prerequisite.h --- a/rpython/translator/c/src/g_prerequisite.h +++ b/rpython/translator/c/src/g_prerequisite.h @@ -21,3 +21,10 @@ # define RPY_LENGTH0 1 /* array decl [0] are bad */ # define RPY_DUMMY_VARLENGTH /* nothing */ #endif + + +#ifdef RPY_STM +#define rpy_duck() asm("":::"memory") // work around an llvm bug :-/ +#else +#define rpy_duck() /* nothing */ +#endif diff --git a/rpython/translator/c/src/mem.c b/rpython/translator/c/src/mem.c --- a/rpython/translator/c/src/mem.c +++ b/rpython/translator/c/src/mem.c @@ -46,7 +46,7 @@ #ifdef RPY_STM // spinlock_acquire/spinlock_release defined in ../../stm/src_stm/stmgcintf.h -static revision_t pypy_debug_alloc_lock = 0; +static Signed pypy_debug_alloc_lock = 0; #else # define spinlock_acquire(lock, targetvalue) /* nothing */ # define spinlock_release(lock) /* nothing */ diff --git a/rpython/translator/c/src/rtyper.c b/rpython/translator/c/src/rtyper.c --- a/rpython/translator/c/src/rtyper.c +++ b/rpython/translator/c/src/rtyper.c @@ -22,17 +22,18 @@ char *RPyString_AsCharP(RPyString *rps) { -#ifdef RPY_STM - rps = (RPyString *)stm_read_barrier((gcptr)rps); -#endif - Signed len = RPyString_Size(rps); + Signed i, len = RPyString_Size(rps); struct _RPyString_dump_t *dump = \ malloc(sizeof(struct _RPyString_dump_t) + len); if (!dump) return "(out of memory!)"; dump->next = _RPyString_dump; _RPyString_dump = dump; - memcpy(dump->data, rps->rs_chars.items, len); + /* can't use memcpy() in case of stm */ + for (i = 0; i < len; i++) { + dump->data[i] = rps->rs_chars.items[i]; + rpy_duck(); + } dump->data[len] = 0; return dump->data; } @@ -48,8 +49,12 @@ RPyString *RPyString_FromString(char *buf) { - int length = strlen(buf); + int i, length = strlen(buf); RPyString *rps = RPyString_New(length); - memcpy(rps->rs_chars.items, buf, length); + /* can't use memcpy() in case of stm */ + for (i = 0; i < length; i++) { + rps->rs_chars.items[i] = buf[i]; + rpy_duck(); + } return rps; } diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -1,4 +1,5 @@ -#include "src_stm/stmgcintf.h" +/* This is not meant to be compiled stand-alone, but with all + of PyPy's #defines and #includes prepended. */ __thread struct stm_thread_local_s stm_thread_local; diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -1,3 +1,7 @@ +#ifndef _RPY_STMGCINTF_H +#define _RPY_STMGCINTF_H + + /* meant to be #included after src_stm/stmgc.h */ #include "stmgc.h" @@ -25,3 +29,6 @@ # define spinlock_release(lock) \ do { write_fence(); assert((lock) != 0); (lock) = 0; } while (0) #endif + + +#endif /* _RPY_STMGCINTF_H */ From noreply at buildbot.pypy.org Sat Mar 8 13:23:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 13:23:44 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/2d15ef133bde Message-ID: <20140308122344.3CC0D1D28A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69803:fa8667037de4 Date: 2014-03-08 13:20 +0100 http://bitbucket.org/pypy/pypy/changeset/fa8667037de4/ Log: import stmgc/2d15ef133bde diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -d0f79129cbb7 +2d15ef133bde diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c --- a/rpython/translator/stm/src_stm/stmgc.c +++ b/rpython/translator/stm/src_stm/stmgc.c @@ -1,5 +1,5 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ -#define _GNU_SOURCE +#define _GNU_SOURCE 1 #include "stmgc.h" #include "stm/atomic.h" #include "stm/list.h" diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -200,6 +200,7 @@ transaction. */ #define STM_PUSH_ROOT(tl, p) (*((tl).shadowstack++) = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))*(--(tl).shadowstack)) +#define STM_POP_ROOT_RET(tl) (*(--(tl).shadowstack)) /* Every thread needs to have a corresponding stm_thread_local_t From noreply at buildbot.pypy.org Sat Mar 8 13:23:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Mar 2014 13:23:45 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/b4a037995423 Message-ID: <20140308122345.6627E1D28A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69804:fbe3b73746df Date: 2014-03-08 13:23 +0100 http://bitbucket.org/pypy/pypy/changeset/fbe3b73746df/ Log: import stmgc/b4a037995423 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -2d15ef133bde +b4a037995423 diff --git a/rpython/translator/stm/src_stm/stm/atomic.h b/rpython/translator/stm/src_stm/stm/atomic.h --- a/rpython/translator/stm/src_stm/stm/atomic.h +++ b/rpython/translator/stm/src_stm/stm/atomic.h @@ -1,4 +1,6 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ +#ifndef _STM_ATOMIC_H +#define _STM_ATOMIC_H /* spin_loop() corresponds to the PAUSE instruction on x86. On other architectures, we generate no instruction (but still need @@ -33,3 +35,6 @@ static inline void write_fence(void) { __sync_synchronize(); } #endif + + +#endif /* _STM_ATOMIC_H */ From noreply at buildbot.pypy.org Sat Mar 8 21:06:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 8 Mar 2014 21:06:39 +0100 (CET) Subject: [pypy-commit] pypy default: work around crash in getaddrinfo on osx (cpython issue17269) Message-ID: <20140308200639.DD49B1C0907@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69805:7f520002bb83 Date: 2014-03-08 12:05 -0800 http://bitbucket.org/pypy/pypy/changeset/7f520002bb83/ Log: work around crash in getaddrinfo on osx (cpython issue17269) diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -1148,6 +1148,9 @@ address_to_fill=None): # port_or_service is a string, not an int (but try str(port_number)). assert port_or_service is None or isinstance(port_or_service, str) + if _c._MACOSX: + if port_or_service is None or port_or_service == '0': + port_or_service = '00' hints = lltype.malloc(_c.addrinfo, flavor='raw', zero=True) rffi.setintfield(hints, 'c_ai_family', family) rffi.setintfield(hints, 'c_ai_socktype', socktype) diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -328,6 +328,11 @@ found = True assert found, lst +def test_getaddrinfo_osx_crash(): + # see CPython issue17269 + for port in [None, '0', '00']: + getaddrinfo('localhost', port, 0, 0, 0, AI_NUMERICSERV) + def test_connect_ex(): s = RSocket() err = s.connect_ex(INETAddress('0.0.0.0', 0)) # should not work From noreply at buildbot.pypy.org Sat Mar 8 21:38:12 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 8 Mar 2014 21:38:12 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_sqlite3 for older sqlite versions Message-ID: <20140308203812.695B01C01F0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69806:d6ca8475ef71 Date: 2014-03-08 12:37 -0800 http://bitbucket.org/pypy/pypy/changeset/d6ca8475ef71/ Log: fix test_sqlite3 for older sqlite versions diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -236,8 +236,14 @@ return 42 con.set_authorizer(authorizer_cb) with pytest.raises(_sqlite3.OperationalError) as e: - con.execute('select 42') - assert str(e.value) == 'authorizer malfunction' + con.execute('select 123') + major, minor, micro = _sqlite3.sqlite_version.split('.')[:3] + if (int(major), int(minor), int(micro)) >= (3, 6, 14): + assert str(e.value) == 'authorizer malfunction' + else: + assert str(e.value) == \ + ("illegal return value (1) from the authorization function - " + "should be SQLITE_OK, SQLITE_IGNORE, or SQLITE_DENY") def test_issue1573(con): From noreply at buildbot.pypy.org Sat Mar 8 22:48:04 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 8 Mar 2014 22:48:04 +0100 (CET) Subject: [pypy-commit] pypy default: win32 cleanup Message-ID: <20140308214804.C35F01C01F0@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69807:032d052a1a9f Date: 2014-03-08 21:45 +0200 http://bitbucket.org/pypy/pypy/changeset/032d052a1a9f/ Log: win32 cleanup diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -322,23 +322,24 @@ # RuntimeWarning, stacklevel=2) argtypes = [] + argsl = list(args) if self._com_index: from ctypes import cast, c_void_p, POINTER - if not args: + if not argsl: raise ValueError( "native COM method call without 'this' parameter" ) - thisvalue = args.pop(0) + thisvalue = argsl.pop(0) thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) keepalives, newargs, argtypes, outargs, errcheckargs = ( - self._convert_args(argtypes, args, kwargs)) - args.insert(0, thisvalue) + self._convert_args(argtypes, argsl, kwargs)) + argsl.insert(0, thisvalue) newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None keepalives, newargs, argtypes, outargs, errcheckargs = ( - self._convert_args(argtypes, args, kwargs)) + self._convert_args(argtypes, argsl, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_function.py @@ -36,11 +36,13 @@ return self._value lib_m = 'm' +has_sinf = True if sys.platform == 'win32': #there is a small chance this fails on Mingw via environ $CC import distutils.ccompiler if distutils.ccompiler.get_default_compiler() == 'msvc': lib_m = 'msvcrt' + has_sinf = False class TestFunction(object): Backend = CTypesBackend @@ -55,6 +57,8 @@ assert x == math.sin(1.23) def test_sinf(self): + if not has_sinf: + py.test.skip("sinf not available") ffi = FFI(backend=self.Backend()) ffi.cdef(""" float sinf(float x); From noreply at buildbot.pypy.org Sat Mar 8 22:48:06 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 8 Mar 2014 22:48:06 +0100 (CET) Subject: [pypy-commit] pypy default: win32 - use test_support.rmtree to prevent race conditions, fix open() calls Message-ID: <20140308214806.012CD1C01F0@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69808:0e3a28693bc6 Date: 2014-03-08 23:30 +0200 http://bitbucket.org/pypy/pypy/changeset/0e3a28693bc6/ Log: win32 - use test_support.rmtree to prevent race conditions, fix open() calls diff --git a/lib-python/2.7/test/test_zipfile.py b/lib-python/2.7/test/test_zipfile.py --- a/lib-python/2.7/test/test_zipfile.py +++ b/lib-python/2.7/test/test_zipfile.py @@ -19,7 +19,7 @@ from unittest import skipUnless from test.test_support import TESTFN, TESTFN_UNICODE, TESTFN_ENCODING, \ - run_unittest, findfile, unlink + run_unittest, findfile, unlink, rmtree try: TESTFN_UNICODE.encode(TESTFN_ENCODING) except (UnicodeError, TypeError): @@ -365,7 +365,8 @@ produces the expected result.""" with zipfile.ZipFile(TESTFN2, "w") as zipfp: zipfp.write(TESTFN) - self.assertEqual(zipfp.read(TESTFN), open(TESTFN).read()) + with open(TESTFN,'r') as fid: + self.assertEqual(zipfp.read(TESTFN), fid.read()) @skipUnless(zlib, "requires zlib") def test_per_file_compression(self): @@ -404,11 +405,12 @@ self.assertEqual(writtenfile, correctfile) # make sure correct data is in correct file - self.assertEqual(fdata, open(writtenfile, "rb").read()) + with open(writtenfile, "rb") as fid: + self.assertEqual(fdata, fid.read()) os.remove(writtenfile) # remove the test file subdirectories - shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) + rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) def test_extract_all(self): with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp: @@ -419,12 +421,13 @@ zipfp.extractall() for fpath, fdata in SMALL_TEST_DATA: outfile = os.path.join(os.getcwd(), fpath) - - self.assertEqual(fdata, open(outfile, "rb").read()) + + with open(outfile, "rb") as fid: + self.assertEqual(fdata, fid.read()) os.remove(outfile) # remove the test file subdirectories - shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) + rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) def check_file(self, filename, content): self.assertTrue(os.path.isfile(filename)) @@ -509,12 +512,12 @@ self.assertEqual(writtenfile, correctfile, msg="extract %r" % arcname) self.check_file(correctfile, content) - shutil.rmtree('target') + rmtree('target') with zipfile.ZipFile(TESTFN2, 'r') as zipfp: zipfp.extractall(targetpath) self.check_file(correctfile, content) - shutil.rmtree('target') + rmtree('target') correctfile = os.path.join(os.getcwd(), *fixedname.split('/')) @@ -523,12 +526,12 @@ self.assertEqual(writtenfile, correctfile, msg="extract %r" % arcname) self.check_file(correctfile, content) - shutil.rmtree(fixedname.split('/')[0]) + rmtree(fixedname.split('/')[0]) with zipfile.ZipFile(TESTFN2, 'r') as zipfp: zipfp.extractall() self.check_file(correctfile, content) - shutil.rmtree(fixedname.split('/')[0]) + rmtree(fixedname.split('/')[0]) os.remove(TESTFN2) @@ -593,6 +596,8 @@ def tearDown(self): unlink(TESTFN) unlink(TESTFN2) + if os.path.exists(TESTFN): + os.remove(TESTFN) class TestZip64InSmallFiles(unittest.TestCase): @@ -712,6 +717,12 @@ class PyZipFileTests(unittest.TestCase): + def teardown(self): + if os.path.exists(TESTFN): + os.remove(TESTFN) + if os.path.exists(TESTFN2): + os.remove(TESTFN2) + def test_write_pyfile(self): with zipfile.PyZipFile(TemporaryFile(), "w") as zipfp: fn = __file__ @@ -773,11 +784,14 @@ self.assertNotIn('mod2.txt', names) finally: - shutil.rmtree(TESTFN2) + rmtree(TESTFN2) def test_write_non_pyfile(self): + if os.path.exists(TESTFN): + os.remove(TESTFN) with zipfile.PyZipFile(TemporaryFile(), "w") as zipfp: - open(TESTFN, 'w').write('most definitely not a python file') + with open(TESTFN, 'w') as fid: + fid.write('most definitely not a python file') self.assertRaises(RuntimeError, zipfp.writepy, TESTFN) os.remove(TESTFN) @@ -940,8 +954,9 @@ self.assertRaises(RuntimeError, zipf.open, "foo.txt") self.assertRaises(RuntimeError, zipf.testzip) self.assertRaises(RuntimeError, zipf.writestr, "bogus.txt", "bogus") - open(TESTFN, 'w').write('zipfile test data') - self.assertRaises(RuntimeError, zipf.write, TESTFN) + with open(TESTFN, 'w') as fid: + fid.write('zipfile test data') + self.assertRaises(RuntimeError, zipf.write, TESTFN) def test_bad_constructor_mode(self): """Check that bad modes passed to ZipFile constructor are caught.""" @@ -1126,6 +1141,7 @@ pass try: zipf = zipfile.ZipFile(TESTFN, mode="r") + zipf.close() except zipfile.BadZipfile: self.fail("Unable to create empty ZIP file in 'w' mode") @@ -1133,6 +1149,7 @@ pass try: zipf = zipfile.ZipFile(TESTFN, mode="r") + zipf.close() except: self.fail("Unable to create empty ZIP file in 'a' mode") @@ -1151,6 +1168,8 @@ def tearDown(self): unlink(TESTFN) unlink(TESTFN2) + if os.path.exists(TESTFN): + os.remove(TESTFN) class DecryptionTests(unittest.TestCase): @@ -1201,16 +1220,28 @@ def test_bad_password(self): self.zip.setpassword("perl") - self.assertRaises(RuntimeError, self.zip.read, "test.txt") + try: + self.assertRaises(RuntimeError, self.zip.read, "test.txt") + finally: + self.zip.close() self.zip2.setpassword("perl") - self.assertRaises(RuntimeError, self.zip2.read, "zero") + try: + self.assertRaises(RuntimeError, self.zip2.read, "zero") + finally: + self.zip2.close() @skipUnless(zlib, "requires zlib") def test_good_password(self): self.zip.setpassword("python") - self.assertEqual(self.zip.read("test.txt"), self.plain) + try: + self.assertEqual(self.zip.read("test.txt"), self.plain) + finally: + self.zip.close() self.zip2.setpassword("12345") - self.assertEqual(self.zip2.read("zero"), self.plain2) + try: + self.assertEqual(self.zip2.read("zero"), self.plain2) + finally: + self.zip2.close() class TestsWithRandomBinaryFiles(unittest.TestCase): @@ -1224,8 +1255,10 @@ fp.write(self.data) def tearDown(self): - unlink(TESTFN) - unlink(TESTFN2) + if os.path.exists(TESTFN): + os.remove(TESTFN) + if os.path.exists(TESTFN2): + os.remove(TESTFN2) def make_test_archive(self, f, compression): # Create the ZIP archive @@ -1329,12 +1362,11 @@ # Verify that (when the ZipFile is in control of creating file objects) # multiple open() calls can be made without interfering with each other. with zipfile.ZipFile(TESTFN2, mode="r") as zipf: - zopen1 = zipf.open('ones') - zopen2 = zipf.open('ones') - data1 = zopen1.read(500) - data2 = zopen2.read(500) - data1 += zopen1.read(500) - data2 += zopen2.read(500) + with zipf.open('ones') as zopen1, zipf.open('ones') as zopen2: + data1 = zopen1.read(500) + data2 = zopen2.read(500) + data1 += zopen1.read(500) + data2 += zopen2.read(500) self.assertEqual(data1, data2) def test_different_file(self): @@ -1394,14 +1426,14 @@ def test_store_dir(self): os.mkdir(os.path.join(TESTFN2, "x")) - zipf = zipfile.ZipFile(TESTFN, "w") - zipf.write(os.path.join(TESTFN2, "x"), "x") - self.assertTrue(zipf.filelist[0].filename.endswith("x/")) + with zipfile.ZipFile(TESTFN, "w") as zipf: + zipf.write(os.path.join(TESTFN2, "x"), "x") + self.assertTrue(zipf.filelist[0].filename.endswith("x/")) def tearDown(self): - shutil.rmtree(TESTFN2) + rmtree(TESTFN2) if os.path.exists(TESTFN): - unlink(TESTFN) + os.remove(TESTFN) class UniversalNewlineTests(unittest.TestCase): @@ -1413,7 +1445,8 @@ for n, s in enumerate(self.seps): self.arcdata[s] = s.join(self.line_gen) + s self.arcfiles[s] = '%s-%d' % (TESTFN, n) - open(self.arcfiles[s], "wb").write(self.arcdata[s]) + with open(self.arcfiles[s], "wb") as fid: + fid.write(self.arcdata[s]) def make_test_archive(self, f, compression): # Create the ZIP archive @@ -1482,8 +1515,9 @@ # Read the ZIP archive with zipfile.ZipFile(f, "r") as zipfp: for sep, fn in self.arcfiles.items(): - for line, zipline in zip(self.line_gen, zipfp.open(fn, "rU")): - self.assertEqual(zipline, line + '\n') + with zipfp.open(fn, "rU") as fid: + for line, zipline in zip(self.line_gen, fid): + self.assertEqual(zipline, line + '\n') def test_read_stored(self): for f in (TESTFN2, TemporaryFile(), StringIO()): From noreply at buildbot.pypy.org Sat Mar 8 23:06:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 8 Mar 2014 23:06:07 +0100 (CET) Subject: [pypy-commit] pypy default: simplify -- why pop and then insert again? Message-ID: <20140308220607.855291C03B3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69809:c5deffb57e50 Date: 2014-03-08 17:05 -0500 http://bitbucket.org/pypy/pypy/changeset/c5deffb57e50/ Log: simplify -- why pop and then insert again? diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -322,24 +322,22 @@ # RuntimeWarning, stacklevel=2) argtypes = [] - argsl = list(args) if self._com_index: from ctypes import cast, c_void_p, POINTER - if not argsl: + if not args: raise ValueError( "native COM method call without 'this' parameter" ) - thisvalue = argsl.pop(0) + thisvalue = args[0] thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) keepalives, newargs, argtypes, outargs, errcheckargs = ( - self._convert_args(argtypes, argsl, kwargs)) - argsl.insert(0, thisvalue) + self._convert_args(argtypes, args[1:], kwargs)) newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None keepalives, newargs, argtypes, outargs, errcheckargs = ( - self._convert_args(argtypes, argsl, kwargs)) + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) From noreply at buildbot.pypy.org Sun Mar 9 03:09:30 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 9 Mar 2014 03:09:30 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default (ac3ce8b66c72) Message-ID: <20140309020930.0F8F61C01F0@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69810:eae61b54e4fd Date: 2014-03-07 12:16 -0800 http://bitbucket.org/pypy/pypy/changeset/eae61b54e4fd/ Log: merge default (ac3ce8b66c72) diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -8,7 +8,7 @@ appleveldefs = { } interpleveldefs = { - '__version__': 'space.wrap("0.8")', + '__version__': 'space.wrap("0.8.2")', 'load_library': 'libraryobj.load_library', diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3175,6 +3175,8 @@ assert alignof(BStruct) == 1 def test_packed_with_bitfields(): + if sys.platform == "win32": + py.test.skip("testing gcc behavior") BLong = new_primitive_type("long") BChar = new_primitive_type("char") BStruct = new_struct_type("struct foo") @@ -3186,4 +3188,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8" + assert __version__ == "0.8.2" diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -275,6 +275,8 @@ self.make_server() + self.make_server() + self.sock.listen(1) s2 = socket.socket() _thread.start_new_thread(s2.connect, (self.sockaddress,)) From noreply at buildbot.pypy.org Sun Mar 9 03:09:35 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 9 Mar 2014 03:09:35 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: merge default w/ stdlib-2.7.6 Message-ID: <20140309020935.EAE851C01F0@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69811:bbf0051fb3d8 Date: 2014-03-08 18:08 -0800 http://bitbucket.org/pypy/pypy/changeset/bbf0051fb3d8/ Log: merge default w/ stdlib-2.7.6 diff too long, truncating to 2000 out of 44245 lines diff --git a/lib-python/2.7/BaseHTTPServer.py b/lib-python/2.7/BaseHTTPServer.py --- a/lib-python/2.7/BaseHTTPServer.py +++ b/lib-python/2.7/BaseHTTPServer.py @@ -447,13 +447,13 @@ specified as subsequent arguments (it's just like printf!). - The client host and current date/time are prefixed to - every message. + The client ip address and current date/time are prefixed to every + message. """ sys.stderr.write("%s - - [%s] %s\n" % - (self.address_string(), + (self.client_address[0], self.log_date_time_string(), format%args)) diff --git a/lib-python/2.7/CGIHTTPServer.py b/lib-python/2.7/CGIHTTPServer.py --- a/lib-python/2.7/CGIHTTPServer.py +++ b/lib-python/2.7/CGIHTTPServer.py @@ -84,9 +84,11 @@ path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ - splitpath = _url_collapse_path_split(self.path) - if splitpath[0] in self.cgi_directories: - self.cgi_info = splitpath + collapsed_path = _url_collapse_path(self.path) + dir_sep = collapsed_path.find('/', 1) + head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] + if head in self.cgi_directories: + self.cgi_info = head, tail return True return False @@ -103,18 +105,17 @@ def run_cgi(self): """Execute a CGI script.""" - path = self.path dir, rest = self.cgi_info - i = path.find('/', len(dir) + 1) + i = rest.find('/') while i >= 0: - nextdir = path[:i] - nextrest = path[i+1:] + nextdir = rest[:i] + nextrest = rest[i+1:] scriptdir = self.translate_path(nextdir) if os.path.isdir(scriptdir): dir, rest = nextdir, nextrest - i = path.find('/', len(dir) + 1) + i = rest.find('/') else: break @@ -298,44 +299,46 @@ self.log_message("CGI script exited OK") -# TODO(gregory.p.smith): Move this into an appropriate library. -def _url_collapse_path_split(path): +def _url_collapse_path(path): """ Given a URL path, remove extra '/'s and '.' path elements and collapse - any '..' references. + any '..' references and returns a colllapsed path. Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. + The utility of this function is limited to is_cgi method and helps + preventing some security attacks. Returns: A tuple of (head, tail) where tail is everything after the final / and head is everything before it. Head will always start with a '/' and, if it contains anything else, never have a trailing '/'. Raises: IndexError if too many '..' occur within the path. + """ # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. - path_parts = [] - for part in path.split('/'): - if part == '.': - path_parts.append('') - else: - path_parts.append(part) - # Filter out blank non trailing parts before consuming the '..'. - path_parts = [part for part in path_parts[:-1] if part] + path_parts[-1:] + path_parts = path.split('/') + head_parts = [] + for part in path_parts[:-1]: + if part == '..': + head_parts.pop() # IndexError if more '..' than prior parts + elif part and part != '.': + head_parts.append( part ) if path_parts: tail_part = path_parts.pop() + if tail_part: + if tail_part == '..': + head_parts.pop() + tail_part = '' + elif tail_part == '.': + tail_part = '' else: tail_part = '' - head_parts = [] - for part in path_parts: - if part == '..': - head_parts.pop() - else: - head_parts.append(part) - if tail_part and tail_part == '..': - head_parts.pop() - tail_part = '' - return ('/' + '/'.join(head_parts), tail_part) + + splitpath = ('/' + '/'.join(head_parts), tail_part) + collapsed_path = "/".join(splitpath) + + return collapsed_path nobody = None diff --git a/lib-python/2.7/Cookie.py b/lib-python/2.7/Cookie.py --- a/lib-python/2.7/Cookie.py +++ b/lib-python/2.7/Cookie.py @@ -238,7 +238,7 @@ # a two-way quoting algorithm. Any non-text character is translated # into a 4 character sequence: a forward-slash followed by the # three-digit octal equivalent of the character. Any '\' or '"' is -# quoted with a preceeding '\' slash. +# quoted with a preceding '\' slash. # # These are taken from RFC2068 and RFC2109. # _LegalChars is the list of chars which don't require "'s @@ -390,7 +390,7 @@ from time import gmtime, time now = time() year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future) - return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \ + return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \ (weekdayname[wd], day, monthname[month], year, hh, mm, ss) @@ -539,7 +539,7 @@ r"(?P" # Start of group 'val' r'"(?:[^\\"]|\\.)*"' # Any doublequoted string r"|" # or - r"\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr + r"\w{3},\s[\s\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr r"|" # or ""+ _LegalCharsPatt +"*" # Any word or empty string r")" # End of group 'val' diff --git a/lib-python/2.7/HTMLParser.py b/lib-python/2.7/HTMLParser.py --- a/lib-python/2.7/HTMLParser.py +++ b/lib-python/2.7/HTMLParser.py @@ -22,13 +22,13 @@ starttagopen = re.compile('<[a-zA-Z]') piclose = re.compile('>') commentclose = re.compile(r'--\s*>') -tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*') +tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*') # see http://www.w3.org/TR/html5/tokenization.html#tag-open-state # and http://www.w3.org/TR/html5/tokenization.html#tag-name-state tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*') attrfind = re.compile( - r'[\s/]*((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*' + r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*' r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') locatestarttagend = re.compile(r""" @@ -289,7 +289,7 @@ match = tagfind.match(rawdata, i+1) assert match, 'unexpected call to parse_starttag()' k = match.end() - self.lasttag = tag = rawdata[i+1:k].lower() + self.lasttag = tag = match.group(1).lower() while k < endpos: m = attrfind.match(rawdata, k) diff --git a/lib-python/2.7/Queue.py b/lib-python/2.7/Queue.py --- a/lib-python/2.7/Queue.py +++ b/lib-python/2.7/Queue.py @@ -109,7 +109,7 @@ If optional args 'block' is true and 'timeout' is None (the default), block if necessary until a free slot is available. If 'timeout' is - a positive number, it blocks at most 'timeout' seconds and raises + a non-negative number, it blocks at most 'timeout' seconds and raises the Full exception if no free slot was available within that time. Otherwise ('block' is false), put an item on the queue if a free slot is immediately available, else raise the Full exception ('timeout' @@ -125,7 +125,7 @@ while self._qsize() == self.maxsize: self.not_full.wait() elif timeout < 0: - raise ValueError("'timeout' must be a positive number") + raise ValueError("'timeout' must be a non-negative number") else: endtime = _time() + timeout while self._qsize() == self.maxsize: @@ -152,7 +152,7 @@ If optional args 'block' is true and 'timeout' is None (the default), block if necessary until an item is available. If 'timeout' is - a positive number, it blocks at most 'timeout' seconds and raises + a non-negative number, it blocks at most 'timeout' seconds and raises the Empty exception if no item was available within that time. Otherwise ('block' is false), return an item if one is immediately available, else raise the Empty exception ('timeout' is ignored @@ -167,7 +167,7 @@ while not self._qsize(): self.not_empty.wait() elif timeout < 0: - raise ValueError("'timeout' must be a positive number") + raise ValueError("'timeout' must be a non-negative number") else: endtime = _time() + timeout while not self._qsize(): diff --git a/lib-python/2.7/SimpleHTTPServer.py b/lib-python/2.7/SimpleHTTPServer.py --- a/lib-python/2.7/SimpleHTTPServer.py +++ b/lib-python/2.7/SimpleHTTPServer.py @@ -149,6 +149,8 @@ # abandon query parameters path = path.split('?',1)[0] path = path.split('#',1)[0] + # Don't forget explicit trailing slash when normalizing. Issue17324 + trailing_slash = path.rstrip().endswith('/') path = posixpath.normpath(urllib.unquote(path)) words = path.split('/') words = filter(None, words) @@ -158,6 +160,8 @@ head, word = os.path.split(word) if word in (os.curdir, os.pardir): continue path = os.path.join(path, word) + if trailing_slash: + path += '/' return path def copyfile(self, source, outputfile): diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -1,4 +1,4 @@ -"""Simple XML-RPC Server. +r"""Simple XML-RPC Server. This module can be used to create simple XML-RPC servers by creating a server and either installing functions, a diff --git a/lib-python/2.7/SocketServer.py b/lib-python/2.7/SocketServer.py --- a/lib-python/2.7/SocketServer.py +++ b/lib-python/2.7/SocketServer.py @@ -133,6 +133,7 @@ import select import sys import os +import errno try: import threading except ImportError: @@ -147,6 +148,15 @@ "ThreadingUnixStreamServer", "ThreadingUnixDatagramServer"]) +def _eintr_retry(func, *args): + """restart a system call interrupted by EINTR""" + while True: + try: + return func(*args) + except (OSError, select.error) as e: + if e.args[0] != errno.EINTR: + raise + class BaseServer: """Base class for server classes. @@ -222,7 +232,8 @@ # connecting to the socket to wake this up instead of # polling. Polling reduces our responsiveness to a # shutdown request and wastes cpu at all other times. - r, w, e = select.select([self], [], [], poll_interval) + r, w, e = _eintr_retry(select.select, [self], [], [], + poll_interval) if self in r: self._handle_request_noblock() finally: @@ -262,7 +273,7 @@ timeout = self.timeout elif self.timeout is not None: timeout = min(timeout, self.timeout) - fd_sets = select.select([self], [], [], timeout) + fd_sets = _eintr_retry(select.select, [self], [], [], timeout) if not fd_sets[0]: self.handle_timeout() return @@ -690,7 +701,12 @@ def finish(self): if not self.wfile.closed: - self.wfile.flush() + try: + self.wfile.flush() + except socket.error: + # An final socket error may have occurred here, such as + # the local error ECONNABORTED. + pass self.wfile.close() self.rfile.close() diff --git a/lib-python/2.7/StringIO.py b/lib-python/2.7/StringIO.py --- a/lib-python/2.7/StringIO.py +++ b/lib-python/2.7/StringIO.py @@ -158,7 +158,7 @@ newpos = self.len else: newpos = i+1 - if length is not None and length > 0: + if length is not None and length >= 0: if self.pos + length < newpos: newpos = self.pos + length r = self.buf[self.pos:newpos] diff --git a/lib-python/2.7/_LWPCookieJar.py b/lib-python/2.7/_LWPCookieJar.py --- a/lib-python/2.7/_LWPCookieJar.py +++ b/lib-python/2.7/_LWPCookieJar.py @@ -48,7 +48,7 @@ class LWPCookieJar(FileCookieJar): """ - The LWPCookieJar saves a sequence of"Set-Cookie3" lines. + The LWPCookieJar saves a sequence of "Set-Cookie3" lines. "Set-Cookie3" is the format used by the libwww-perl libary, not known to be compatible with any browser, but which is easy to read and doesn't lose information about RFC 2965 cookies. @@ -60,7 +60,7 @@ """ def as_lwp_str(self, ignore_discard=True, ignore_expires=True): - """Return cookies as a string of "\n"-separated "Set-Cookie3" headers. + """Return cookies as a string of "\\n"-separated "Set-Cookie3" headers. ignore_discard and ignore_expires: see docstring for FileCookieJar.save diff --git a/lib-python/2.7/__future__.py b/lib-python/2.7/__future__.py --- a/lib-python/2.7/__future__.py +++ b/lib-python/2.7/__future__.py @@ -112,7 +112,7 @@ CO_FUTURE_DIVISION) absolute_import = _Feature((2, 5, 0, "alpha", 1), - (2, 7, 0, "alpha", 0), + (3, 0, 0, "alpha", 0), CO_FUTURE_ABSOLUTE_IMPORT) with_statement = _Feature((2, 5, 0, "alpha", 1), diff --git a/lib-python/2.7/_abcoll.py b/lib-python/2.7/_abcoll.py --- a/lib-python/2.7/_abcoll.py +++ b/lib-python/2.7/_abcoll.py @@ -74,6 +74,7 @@ @abstractmethod def next(self): + 'Return the next item from the iterator. When exhausted, raise StopIteration' raise StopIteration def __iter__(self): @@ -194,6 +195,7 @@ return self._from_iterable(value for value in other if value in self) def isdisjoint(self, other): + 'Return True if two sets have a null intersection.' for value in other: if value in self: return False @@ -259,6 +261,16 @@ class MutableSet(Set): + """A mutable set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__, __len__, + add(), and discard(). + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + """ @abstractmethod def add(self, value): @@ -333,11 +345,20 @@ class Mapping(Sized, Iterable, Container): + """A Mapping is a generic container for associating key/value + pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __iter__, and __len__. + + """ + @abstractmethod def __getitem__(self, key): raise KeyError def get(self, key, default=None): + 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' try: return self[key] except KeyError: @@ -352,23 +373,29 @@ return True def iterkeys(self): + 'D.iterkeys() -> an iterator over the keys of D' return iter(self) def itervalues(self): + 'D.itervalues() -> an iterator over the values of D' for key in self: yield self[key] def iteritems(self): + 'D.iteritems() -> an iterator over the (key, value) items of D' for key in self: yield (key, self[key]) def keys(self): + "D.keys() -> list of D's keys" return list(self) def items(self): + "D.items() -> list of D's (key, value) pairs, as 2-tuples" return [(key, self[key]) for key in self] def values(self): + "D.values() -> list of D's values" return [self[key] for key in self] # Mappings are not hashable by default, but subclasses can change this @@ -443,6 +470,15 @@ class MutableMapping(Mapping): + """A MutableMapping is a generic container for associating + key/value pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __setitem__, __delitem__, + __iter__, and __len__. + + """ + @abstractmethod def __setitem__(self, key, value): raise KeyError @@ -454,6 +490,9 @@ __marker = object() def pop(self, key, default=__marker): + '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + ''' try: value = self[key] except KeyError: @@ -465,6 +504,9 @@ return value def popitem(self): + '''D.popitem() -> (k, v), remove and return some (key, value) pair + as a 2-tuple; but raise KeyError if D is empty. + ''' try: key = next(iter(self)) except StopIteration: @@ -474,6 +516,7 @@ return key, value def clear(self): + 'D.clear() -> None. Remove all items from D.' try: while True: self.popitem() @@ -481,6 +524,11 @@ pass def update(*args, **kwds): + ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. + If E present and has a .keys() method, does: for k in E: D[k] = E[k] + If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v + In either case, this is followed by: for k, v in F.items(): D[k] = v + ''' if len(args) > 2: raise TypeError("update() takes at most 2 positional " "arguments ({} given)".format(len(args))) @@ -502,6 +550,7 @@ self[key] = value def setdefault(self, key, default=None): + 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' try: return self[key] except KeyError: @@ -546,12 +595,16 @@ yield self[i] def index(self, value): + '''S.index(value) -> integer -- return first index of value. + Raises ValueError if the value is not present. + ''' for i, v in enumerate(self): if v == value: return i raise ValueError def count(self, value): + 'S.count(value) -> integer -- return number of occurrences of value' return sum(1 for v in self if v == value) Sequence.register(tuple) @@ -562,6 +615,13 @@ class MutableSequence(Sequence): + """All the operations on a read-only sequence. + + Concrete subclasses must provide __new__ or __init__, + __getitem__, __setitem__, __delitem__, __len__, and insert(). + + """ + @abstractmethod def __setitem__(self, index, value): raise IndexError @@ -572,26 +632,36 @@ @abstractmethod def insert(self, index, value): + 'S.insert(index, object) -- insert object before index' raise IndexError def append(self, value): + 'S.append(object) -- append object to the end of the sequence' self.insert(len(self), value) def reverse(self): + 'S.reverse() -- reverse *IN PLACE*' n = len(self) for i in range(n//2): self[i], self[n-i-1] = self[n-i-1], self[i] def extend(self, values): + 'S.extend(iterable) -- extend sequence by appending elements from the iterable' for v in values: self.append(v) def pop(self, index=-1): + '''S.pop([index]) -> item -- remove and return item at index (default last). + Raise IndexError if list is empty or index is out of range. + ''' v = self[index] del self[index] return v def remove(self, value): + '''S.remove(value) -- remove first occurrence of value. + Raise ValueError if the value is not present. + ''' del self[self.index(value)] def __iadd__(self, values): diff --git a/lib-python/2.7/_osx_support.py b/lib-python/2.7/_osx_support.py new file mode 100644 --- /dev/null +++ b/lib-python/2.7/_osx_support.py @@ -0,0 +1,494 @@ +"""Shared OS X support functions.""" + +import os +import re +import sys + +__all__ = [ + 'compiler_fixup', + 'customize_config_vars', + 'customize_compiler', + 'get_platform_osx', +] + +# configuration variables that may contain universal build flags, +# like "-arch" or "-isdkroot", that may need customization for +# the user environment +_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS', + 'BLDSHARED', 'LDSHARED', 'CC', 'CXX', + 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS', + 'PY_CORE_CFLAGS') + +# configuration variables that may contain compiler calls +_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX') + +# prefix added to original configuration variable names +_INITPRE = '_OSX_SUPPORT_INITIAL_' + + +def _find_executable(executable, path=None): + """Tries to find 'executable' in the directories listed in 'path'. + + A string listing directories separated by 'os.pathsep'; defaults to + os.environ['PATH']. Returns the complete filename or None if not found. + """ + if path is None: + path = os.environ['PATH'] + + paths = path.split(os.pathsep) + base, ext = os.path.splitext(executable) + + if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'): + executable = executable + '.exe' + + if not os.path.isfile(executable): + for p in paths: + f = os.path.join(p, executable) + if os.path.isfile(f): + # the file exists, we have a shot at spawn working + return f + return None + else: + return executable + + +def _read_output(commandstring): + """Output from successful command execution or None""" + # Similar to os.popen(commandstring, "r").read(), + # but without actually using os.popen because that + # function is not usable during python bootstrap. + # tempfile is also not available then. + import contextlib + try: + import tempfile + fp = tempfile.NamedTemporaryFile() + except ImportError: + fp = open("/tmp/_osx_support.%s"%( + os.getpid(),), "w+b") + + with contextlib.closing(fp) as fp: + cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) + return fp.read().strip() if not os.system(cmd) else None + + +def _find_build_tool(toolname): + """Find a build tool on current path or using xcrun""" + return (_find_executable(toolname) + or _read_output("/usr/bin/xcrun -find %s" % (toolname,)) + or '' + ) + +_SYSTEM_VERSION = None + +def _get_system_version(): + """Return the OS X system version as a string""" + # Reading this plist is a documented way to get the system + # version (see the documentation for the Gestalt Manager) + # We avoid using platform.mac_ver to avoid possible bootstrap issues during + # the build of Python itself (distutils is used to build standard library + # extensions). + + global _SYSTEM_VERSION + + if _SYSTEM_VERSION is None: + _SYSTEM_VERSION = '' + try: + f = open('/System/Library/CoreServices/SystemVersion.plist') + except IOError: + # We're on a plain darwin box, fall back to the default + # behaviour. + pass + else: + try: + m = re.search(r'ProductUserVisibleVersion\s*' + r'(.*?)', f.read()) + finally: + f.close() + if m is not None: + _SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + + return _SYSTEM_VERSION + +def _remove_original_values(_config_vars): + """Remove original unmodified values for testing""" + # This is needed for higher-level cross-platform tests of get_platform. + for k in list(_config_vars): + if k.startswith(_INITPRE): + del _config_vars[k] + +def _save_modified_value(_config_vars, cv, newvalue): + """Save modified and original unmodified value of configuration var""" + + oldvalue = _config_vars.get(cv, '') + if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars): + _config_vars[_INITPRE + cv] = oldvalue + _config_vars[cv] = newvalue + +def _supports_universal_builds(): + """Returns True if universal builds are supported on this system""" + # As an approximation, we assume that if we are running on 10.4 or above, + # then we are running with an Xcode environment that supports universal + # builds, in particular -isysroot and -arch arguments to the compiler. This + # is in support of allowing 10.4 universal builds to run on 10.3.x systems. + + osx_version = _get_system_version() + if osx_version: + try: + osx_version = tuple(int(i) for i in osx_version.split('.')) + except ValueError: + osx_version = '' + return bool(osx_version >= (10, 4)) if osx_version else False + + +def _find_appropriate_compiler(_config_vars): + """Find appropriate C compiler for extension module builds""" + + # Issue #13590: + # The OSX location for the compiler varies between OSX + # (or rather Xcode) releases. With older releases (up-to 10.5) + # the compiler is in /usr/bin, with newer releases the compiler + # can only be found inside Xcode.app if the "Command Line Tools" + # are not installed. + # + # Futhermore, the compiler that can be used varies between + # Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2' + # as the compiler, after that 'clang' should be used because + # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that + # miscompiles Python. + + # skip checks if the compiler was overriden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + # The CC config var might contain additional arguments. + # Ignore them while searching. + cc = oldcc = _config_vars['CC'].split()[0] + if not _find_executable(cc): + # Compiler is not found on the shell search PATH. + # Now search for clang, first on PATH (if the Command LIne + # Tools have been installed in / or if the user has provided + # another location via CC). If not found, try using xcrun + # to find an uninstalled clang (within a selected Xcode). + + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself (and os.popen is + # implemented on top of subprocess and is therefore not + # usable as well) + + cc = _find_build_tool('clang') + + elif os.path.basename(cc).startswith('gcc'): + # Compiler is GCC, check if it is LLVM-GCC + data = _read_output("'%s' --version" + % (cc.replace("'", "'\"'\"'"),)) + if 'llvm-gcc' in data: + # Found LLVM-GCC, fall back to clang + cc = _find_build_tool('clang') + + if not cc: + raise SystemError( + "Cannot locate working compiler") + + if cc != oldcc: + # Found a replacement compiler. + # Modify config vars using new compiler, if not already explicitly + # overriden by an env variable, preserving additional arguments. + for cv in _COMPILER_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + cv_split = _config_vars[cv].split() + cv_split[0] = cc if cv != 'CXX' else cc + '++' + _save_modified_value(_config_vars, cv, ' '.join(cv_split)) + + return _config_vars + + +def _remove_universal_flags(_config_vars): + """Remove all universal build arguments from config vars""" + + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overriden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = re.sub('-isysroot [^ \t]*', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _remove_unsupported_archs(_config_vars): + """Remove any unsupported archs from config vars""" + # Different Xcode releases support different sets for '-arch' + # flags. In particular, Xcode 4.x no longer supports the + # PPC architectures. + # + # This code automatically removes '-arch ppc' and '-arch ppc64' + # when these are not supported. That makes it possible to + # build extensions on OSX 10.7 and later with the prebuilt + # 32-bit installer on the python.org website. + + # skip checks if the compiler was overriden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None: + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself + status = os.system( + """echo 'int main{};' | """ + """'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null""" + %(_config_vars['CC'].replace("'", "'\"'\"'"),)) + if status: + # The compile failed for some reason. Because of differences + # across Xcode and compiler versions, there is no reliable way + # to be sure why it failed. Assume here it was due to lack of + # PPC support and remove the related '-arch' flags from each + # config variables not explicitly overriden by an environment + # variable. If the error was for some other reason, we hope the + # failure will show up again when trying to compile an extension + # module. + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub('-arch\s+ppc\w*\s', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _override_all_archs(_config_vars): + """Allow override of all archs with ARCHFLAGS env var""" + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and '-arch' in _config_vars[cv]: + flags = _config_vars[cv] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _check_for_unavailable_sdk(_config_vars): + """Remove references to any SDKs not available""" + # If we're on OSX 10.5 or later and the user tries to + # compile an extension using an SDK that is not present + # on the current machine it is better to not use an SDK + # than to fail. This is particularly important with + # the standalone Command Line Tools alternative to a + # full-blown Xcode install since the CLT packages do not + # provide SDKs. If the SDK is not present, it is assumed + # that the header files and dev libs have been installed + # to /usr and /System/Library by either a standalone CLT + # package or the CLT component within Xcode. + cflags = _config_vars.get('CFLAGS', '') + m = re.search(r'-isysroot\s+(\S+)', cflags) + if m is not None: + sdk = m.group(1) + if not os.path.exists(sdk): + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overriden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def compiler_fixup(compiler_so, cc_args): + """ + This function will strip '-isysroot PATH' and '-arch ARCH' from the + compile flags if the user has specified one them in extra_compile_flags. + + This is needed because '-arch ARCH' adds another architecture to the + build, without a way to remove an architecture. Furthermore GCC will + barf if multiple '-isysroot' arguments are present. + """ + stripArch = stripSysroot = False + + compiler_so = list(compiler_so) + + if not _supports_universal_builds(): + # OSX before 10.4.0, these don't support -arch and -isysroot at + # all. + stripArch = stripSysroot = True + else: + stripArch = '-arch' in cc_args + stripSysroot = '-isysroot' in cc_args + + if stripArch or 'ARCHFLAGS' in os.environ: + while True: + try: + index = compiler_so.index('-arch') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + if 'ARCHFLAGS' in os.environ and not stripArch: + # User specified different -arch flags in the environ, + # see also distutils.sysconfig + compiler_so = compiler_so + os.environ['ARCHFLAGS'].split() + + if stripSysroot: + while True: + try: + index = compiler_so.index('-isysroot') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + # Check if the SDK that is used during compilation actually exists, + # the universal build requires the usage of a universal SDK and not all + # users have that installed by default. + sysroot = None + if '-isysroot' in cc_args: + idx = cc_args.index('-isysroot') + sysroot = cc_args[idx+1] + elif '-isysroot' in compiler_so: + idx = compiler_so.index('-isysroot') + sysroot = compiler_so[idx+1] + + if sysroot and not os.path.isdir(sysroot): + from distutils import log + log.warn("Compiling with an SDK that doesn't seem to exist: %s", + sysroot) + log.warn("Please check your Xcode installation") + + return compiler_so + + +def customize_config_vars(_config_vars): + """Customize Python build configuration variables. + + Called internally from sysconfig with a mutable mapping + containing name/value pairs parsed from the configured + makefile used to build this interpreter. Returns + the mapping updated as needed to reflect the environment + in which the interpreter is running; in the case of + a Python from a binary installer, the installed + environment may be very different from the build + environment, i.e. different OS levels, different + built tools, different available CPU architectures. + + This customization is performed whenever + distutils.sysconfig.get_config_vars() is first + called. It may be used in environments where no + compilers are present, i.e. when installing pure + Python dists. Customization of compiler paths + and detection of unavailable archs is deferred + until the first extension module build is + requested (in distutils.sysconfig.customize_compiler). + + Currently called from distutils.sysconfig + """ + + if not _supports_universal_builds(): + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + _remove_universal_flags(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + # Remove references to sdks that are not found + _check_for_unavailable_sdk(_config_vars) + + return _config_vars + + +def customize_compiler(_config_vars): + """Customize compiler path and configuration variables. + + This customization is performed when the first + extension module build is requested + in distutils.sysconfig.customize_compiler). + """ + + # Find a compiler to use for extension module builds + _find_appropriate_compiler(_config_vars) + + # Remove ppc arch flags if not supported here + _remove_unsupported_archs(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + return _config_vars + + +def get_platform_osx(_config_vars, osname, release, machine): + """Filter values for get_platform()""" + # called from get_platform() in sysconfig and distutils.util + # + # For our purposes, we'll assume that the system version from + # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set + # to. This makes the compatibility story a bit more sane because the + # machine is going to compile and link as if it were + # MACOSX_DEPLOYMENT_TARGET. + + macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '') + macrelease = _get_system_version() or macver + macver = macver or macrelease + + if macver: + release = macver + osname = "macosx" + + # Use the original CFLAGS value, if available, so that we + # return the same machine type for the platform string. + # Otherwise, distutils may consider this a cross-compiling + # case and disallow installs. + cflags = _config_vars.get(_INITPRE+'CFLAGS', + _config_vars.get('CFLAGS', '')) + if ((macrelease + '.') >= '10.4.' and + '-arch' in cflags.strip()): + # The universal build will build fat binaries, but not on + # systems before 10.4 + + machine = 'fat' + + archs = re.findall('-arch\s+(\S+)', cflags) + archs = tuple(sorted(set(archs))) + + if len(archs) == 1: + machine = archs[0] + elif archs == ('i386', 'ppc'): + machine = 'fat' + elif archs == ('i386', 'x86_64'): + machine = 'intel' + elif archs == ('i386', 'ppc', 'x86_64'): + machine = 'fat3' + elif archs == ('ppc64', 'x86_64'): + machine = 'fat64' + elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): + machine = 'universal' + else: + raise ValueError( + "Don't know machine value for archs=%r" % (archs,)) + + elif machine == 'i386': + # On OSX the machine type returned by uname is always the + # 32-bit variant, even if the executable architecture is + # the 64-bit variant + if sys.maxint >= 2**32: + machine = 'x86_64' + + elif machine in ('PowerPC', 'Power_Macintosh'): + # Pick a sane name for the PPC architecture. + # See 'i386' case + if sys.maxint >= 2**32: + machine = 'ppc64' + else: + machine = 'ppc' + + return (osname, release, machine) diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -298,7 +298,7 @@ def seek(self, pos, whence=0): """Change stream position. - Change the stream position to byte offset offset. offset is + Change the stream position to byte offset pos. Argument pos is interpreted relative to the position indicated by whence. Values for whence are: @@ -340,8 +340,10 @@ This method has no effect if the file is already closed. """ if not self.__closed: - self.flush() - self.__closed = True + try: + self.flush() + finally: + self.__closed = True def __del__(self): """Destructor. Calls close().""" @@ -883,12 +885,18 @@ return pos def readable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True def writable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True @@ -1451,7 +1459,7 @@ enabled. With this enabled, on input, the lines endings '\n', '\r', or '\r\n' are translated to '\n' before being returned to the caller. Conversely, on output, '\n' is translated to the system - default line seperator, os.linesep. If newline is any other of its + default line separator, os.linesep. If newline is any other of its legal values, that newline becomes the newline when the file is read and it is returned untranslated. On output, '\n' is converted to the newline. @@ -1546,6 +1554,8 @@ return self._buffer def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return self._seekable def readable(self): @@ -1560,8 +1570,10 @@ def close(self): if self.buffer is not None and not self.closed: - self.flush() - self.buffer.close() + try: + self.flush() + finally: + self.buffer.close() @property def closed(self): diff --git a/lib-python/2.7/_strptime.py b/lib-python/2.7/_strptime.py --- a/lib-python/2.7/_strptime.py +++ b/lib-python/2.7/_strptime.py @@ -222,7 +222,7 @@ """Convert a list to a regex string for matching a directive. Want possible matching values to be from longest to shortest. This - prevents the possibility of a match occuring for a value that also + prevents the possibility of a match occurring for a value that also a substring of a larger value that should have matched (e.g., 'abc' matching when 'abcdef' should have been the match). @@ -326,7 +326,8 @@ if len(data_string) != found.end(): raise ValueError("unconverted data remains: %s" % data_string[found.end():]) - year = 1900 + + year = None month = day = 1 hour = minute = second = fraction = 0 tz = -1 @@ -425,6 +426,12 @@ else: tz = value break + leap_year_fix = False + if year is None and month == 2 and day == 29: + year = 1904 # 1904 is first leap year of 20th century + leap_year_fix = True + elif year is None: + year = 1900 # If we know the week of the year and what day of that week, we can figure # out the Julian day of the year. if julian == -1 and week_of_year != -1 and weekday != -1: @@ -446,6 +453,12 @@ day = datetime_result.day if weekday == -1: weekday = datetime_date(year, month, day).weekday() + if leap_year_fix: + # the caller didn't supply a year but asked for Feb 29th. We couldn't + # use the default of 1900 for computations. We set it back to ensure + # that February 29th is smaller than March 1st. + year = 1900 + return (time.struct_time((year, month, day, hour, minute, second, weekday, julian, tz)), fraction) diff --git a/lib-python/2.7/_weakrefset.py b/lib-python/2.7/_weakrefset.py --- a/lib-python/2.7/_weakrefset.py +++ b/lib-python/2.7/_weakrefset.py @@ -63,7 +63,7 @@ yield item def __len__(self): - return sum(x() is not None for x in self.data) + return len(self.data) - len(self._pending_removals) def __contains__(self, item): try: @@ -116,36 +116,21 @@ def update(self, other): if self._pending_removals: self._commit_removals() - if isinstance(other, self.__class__): - self.data.update(other.data) - else: - for element in other: - self.add(element) + for element in other: + self.add(element) def __ior__(self, other): self.update(other) return self - # Helper functions for simple delegating methods. - def _apply(self, other, method): - if not isinstance(other, self.__class__): - other = self.__class__(other) - newdata = method(other.data) - newset = self.__class__() - newset.data = newdata + def difference(self, other): + newset = self.copy() + newset.difference_update(other) return newset - - def difference(self, other): - return self._apply(other, self.data.difference) __sub__ = difference def difference_update(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.difference_update(ref(item) for item in other) + self.__isub__(other) def __isub__(self, other): if self._pending_removals: self._commit_removals() @@ -156,13 +141,11 @@ return self def intersection(self, other): - return self._apply(other, self.data.intersection) + return self.__class__(item for item in other if item in self) __and__ = intersection def intersection_update(self, other): - if self._pending_removals: - self._commit_removals() - self.data.intersection_update(ref(item) for item in other) + self.__iand__(other) def __iand__(self, other): if self._pending_removals: self._commit_removals() @@ -171,45 +154,48 @@ def issubset(self, other): return self.data.issubset(ref(item) for item in other) - __lt__ = issubset + __le__ = issubset - def __le__(self, other): - return self.data <= set(ref(item) for item in other) + def __lt__(self, other): + return self.data < set(ref(item) for item in other) def issuperset(self, other): return self.data.issuperset(ref(item) for item in other) - __gt__ = issuperset + __ge__ = issuperset - def __ge__(self, other): - return self.data >= set(ref(item) for item in other) + def __gt__(self, other): + return self.data > set(ref(item) for item in other) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.data == set(ref(item) for item in other) + def __ne__(self, other): + opposite = self.__eq__(other) + if opposite is NotImplemented: + return NotImplemented + return not opposite + def symmetric_difference(self, other): - return self._apply(other, self.data.symmetric_difference) + newset = self.copy() + newset.symmetric_difference_update(other) + return newset __xor__ = symmetric_difference def symmetric_difference_update(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.symmetric_difference_update(ref(item) for item in other) + self.__ixor__(other) def __ixor__(self, other): if self._pending_removals: self._commit_removals() if self is other: self.data.clear() else: - self.data.symmetric_difference_update(ref(item) for item in other) + self.data.symmetric_difference_update(ref(item, self._remove) for item in other) return self def union(self, other): - return self._apply(other, self.data.union) + return self.__class__(e for s in (self, other) for e in s) __or__ = union def isdisjoint(self, other): diff --git a/lib-python/2.7/aifc.py b/lib-python/2.7/aifc.py --- a/lib-python/2.7/aifc.py +++ b/lib-python/2.7/aifc.py @@ -123,7 +123,7 @@ compression type, and then write audio frames using writeframesraw. When all frames have been written, either call writeframes('') or close() to patch up the sizes in the header. -Marks can be added anytime. If there are any marks, ypu must call +Marks can be added anytime. If there are any marks, you must call close() after all frames have been written. The close() method is called automatically when the class instance is destroyed. @@ -480,31 +480,30 @@ pass else: self._convert = self._adpcm2lin - self._framesize = self._framesize // 4 + self._sampwidth = 2 return # for ULAW and ALAW try Compression Library try: import cl except ImportError: - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): try: import audioop self._convert = self._ulaw2lin - self._framesize = self._framesize // 2 + self._sampwidth = 2 return except ImportError: pass raise Error, 'cannot read compressed AIFF-C files' - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): scheme = cl.G711_ULAW - self._framesize = self._framesize // 2 - elif self._comptype == 'ALAW': + elif self._comptype in ('ALAW', 'alaw'): scheme = cl.G711_ALAW - self._framesize = self._framesize // 2 else: raise Error, 'unsupported compression type' self._decomp = cl.OpenDecompressor(scheme) self._convert = self._decomp_data + self._sampwidth = 2 else: self._comptype = 'NONE' self._compname = 'not compressed' @@ -655,7 +654,7 @@ def setcomptype(self, comptype, compname): if self._nframeswritten: raise Error, 'cannot change parameters after starting to write' - if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'): + if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): raise Error, 'unsupported compression type' self._comptype = comptype self._compname = compname @@ -675,7 +674,7 @@ nchannels, sampwidth, framerate, nframes, comptype, compname = info if self._nframeswritten: raise Error, 'cannot change parameters after starting to write' - if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'): + if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): raise Error, 'unsupported compression type' self.setnchannels(nchannels) self.setsampwidth(sampwidth) @@ -732,22 +731,28 @@ self._patchheader() def close(self): - self._ensure_header_written(0) - if self._datawritten & 1: - # quick pad to even size - self._file.write(chr(0)) - self._datawritten = self._datawritten + 1 - self._writemarkers() - if self._nframeswritten != self._nframes or \ - self._datalength != self._datawritten or \ - self._marklength: - self._patchheader() - if self._comp: - self._comp.CloseCompressor() - self._comp = None - # Prevent ref cycles - self._convert = None - self._file.close() + if self._file is None: + return + try: + self._ensure_header_written(0) + if self._datawritten & 1: + # quick pad to even size + self._file.write(chr(0)) + self._datawritten = self._datawritten + 1 + self._writemarkers() + if self._nframeswritten != self._nframes or \ + self._datalength != self._datawritten or \ + self._marklength: + self._patchheader() + if self._comp: + self._comp.CloseCompressor() + self._comp = None + finally: + # Prevent ref cycles + self._convert = None + f = self._file + self._file = None + f.close() # # Internal methods. @@ -798,7 +803,7 @@ try: import cl except ImportError: - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): try: import audioop self._convert = self._lin2ulaw @@ -806,9 +811,9 @@ except ImportError: pass raise Error, 'cannot write compressed AIFF-C files' - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): scheme = cl.G711_ULAW - elif self._comptype == 'ALAW': + elif self._comptype in ('ALAW', 'alaw'): scheme = cl.G711_ALAW else: raise Error, 'unsupported compression type' @@ -861,7 +866,10 @@ _write_short(self._file, self._nchannels) self._nframes_pos = self._file.tell() _write_ulong(self._file, self._nframes) - _write_short(self._file, self._sampwidth * 8) + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): + _write_short(self._file, 8) + else: + _write_short(self._file, self._sampwidth * 8) _write_float(self._file, self._framerate) if self._aifc: self._file.write(self._comptype) @@ -947,23 +955,27 @@ sys.argv.append('/usr/demos/data/audio/bach.aiff') fn = sys.argv[1] f = open(fn, 'r') - print "Reading", fn - print "nchannels =", f.getnchannels() - print "nframes =", f.getnframes() - print "sampwidth =", f.getsampwidth() - print "framerate =", f.getframerate() - print "comptype =", f.getcomptype() - print "compname =", f.getcompname() - if sys.argv[2:]: - gn = sys.argv[2] - print "Writing", gn - g = open(gn, 'w') - g.setparams(f.getparams()) - while 1: - data = f.readframes(1024) - if not data: - break - g.writeframes(data) - g.close() + try: + print "Reading", fn + print "nchannels =", f.getnchannels() + print "nframes =", f.getnframes() + print "sampwidth =", f.getsampwidth() + print "framerate =", f.getframerate() + print "comptype =", f.getcomptype() + print "compname =", f.getcompname() + if sys.argv[2:]: + gn = sys.argv[2] + print "Writing", gn + g = open(gn, 'w') + try: + g.setparams(f.getparams()) + while 1: + data = f.readframes(1024) + if not data: + break + g.writeframes(data) + finally: + g.close() + print "Done." + finally: f.close() - print "Done." diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -740,10 +740,10 @@ - default -- The value to be produced if the option is not specified. - - type -- The type which the command-line arguments should be converted - to, should be one of 'string', 'int', 'float', 'complex' or a - callable object that accepts a single string argument. If None, - 'string' is assumed. + - type -- A callable that accepts a single string argument, and + returns the converted value. The standard Python types str, int, + float, and complex are useful examples of such callables. If None, + str is used. - choices -- A container of values that should be allowed. If not None, after a command-line argument has been converted to the appropriate @@ -1692,9 +1692,12 @@ return args def parse_known_args(self, args=None, namespace=None): - # args default to the system args if args is None: + # args default to the system args args = _sys.argv[1:] + else: + # make sure that args are mutable + args = list(args) # default Namespace built from parser defaults if namespace is None: @@ -1705,10 +1708,7 @@ if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: - default = action.default - if isinstance(action.default, basestring): - default = self._get_value(action, default) - setattr(namespace, action.dest, default) + setattr(namespace, action.dest, action.default) # add any parser defaults that aren't present for dest in self._defaults: @@ -1948,12 +1948,23 @@ if positionals: self.error(_('too few arguments')) - # make sure all required actions were present + # make sure all required actions were present, and convert defaults. for action in self._actions: - if action.required: - if action not in seen_actions: + if action not in seen_actions: + if action.required: name = _get_action_name(action) self.error(_('argument %s is required') % name) + else: + # Convert action default now instead of doing it before + # parsing arguments to avoid calling convert functions + # twice (which may fail) if the argument was given, but + # only if it was defined already in the namespace + if (action.default is not None and + isinstance(action.default, basestring) and + hasattr(namespace, action.dest) and + action.default is getattr(namespace, action.dest)): + setattr(namespace, action.dest, + self._get_value(action, action.default)) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: @@ -1979,7 +1990,7 @@ for arg_string in arg_strings: # for regular arguments, just add them back into the list - if arg_string[0] not in self.fromfile_prefix_chars: + if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content @@ -2186,9 +2197,12 @@ # Value conversion methods # ======================== def _get_values(self, action, arg_strings): - # for everything but PARSER args, strip out '--' + # for everything but PARSER, REMAINDER args, strip out first '--' if action.nargs not in [PARSER, REMAINDER]: - arg_strings = [s for s in arg_strings if s != '--'] + try: + arg_strings.remove('--') + except ValueError: + pass # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: diff --git a/lib-python/2.7/asyncore.py b/lib-python/2.7/asyncore.py --- a/lib-python/2.7/asyncore.py +++ b/lib-python/2.7/asyncore.py @@ -225,6 +225,7 @@ debug = False connected = False accepting = False + connecting = False closing = False addr = None ignore_log_types = frozenset(['warning']) @@ -248,7 +249,7 @@ try: self.addr = sock.getpeername() except socket.error, err: - if err.args[0] == ENOTCONN: + if err.args[0] in (ENOTCONN, EINVAL): # To handle the case where we got an unconnected # socket. self.connected = False @@ -342,9 +343,11 @@ def connect(self, address): self.connected = False + self.connecting = True err = self.socket.connect_ex(address) if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \ or err == EINVAL and os.name in ('nt', 'ce'): + self.addr = address return if err in (0, EISCONN): self.addr = address @@ -390,7 +393,7 @@ else: return data except socket.error, why: - # winsock sometimes throws ENOTCONN + # winsock sometimes raises ENOTCONN if why.args[0] in _DISCONNECTED: self.handle_close() return '' @@ -400,6 +403,7 @@ def close(self): self.connected = False self.accepting = False + self.connecting = False self.del_channel() try: self.socket.close() @@ -438,7 +442,8 @@ # sockets that are connected self.handle_accept() elif not self.connected: - self.handle_connect_event() + if self.connecting: + self.handle_connect_event() self.handle_read() else: self.handle_read() @@ -449,6 +454,7 @@ raise socket.error(err, _strerror(err)) self.handle_connect() self.connected = True + self.connecting = False def handle_write_event(self): if self.accepting: @@ -457,12 +463,8 @@ return if not self.connected: - #check for errors - err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) - if err != 0: - raise socket.error(err, _strerror(err)) - - self.handle_connect_event() + if self.connecting: + self.handle_connect_event() self.handle_write() def handle_expt_event(self): diff --git a/lib-python/2.7/bdb.py b/lib-python/2.7/bdb.py --- a/lib-python/2.7/bdb.py +++ b/lib-python/2.7/bdb.py @@ -24,6 +24,7 @@ self.skip = set(skip) if skip else None self.breaks = {} self.fncache = {} + self.frame_returning = None def canonic(self, filename): if filename == "<" + filename[1:-1] + ">": @@ -82,7 +83,11 @@ def dispatch_return(self, frame, arg): if self.stop_here(frame) or frame == self.returnframe: - self.user_return(frame, arg) + try: + self.frame_returning = frame + self.user_return(frame, arg) + finally: + self.frame_returning = None if self.quitting: raise BdbQuit return self.trace_dispatch @@ -186,6 +191,14 @@ def set_step(self): """Stop after one line of code.""" + # Issue #13183: pdb skips frames after hitting a breakpoint and running + # step commands. + # Restore the trace function in the caller (that may not have been set + # for performance reasons) when returning from the current frame. + if self.frame_returning: + caller_frame = self.frame_returning.f_back + if caller_frame and not caller_frame.f_trace: + caller_frame.f_trace = self.trace_dispatch self._set_stopinfo(None, None) def set_next(self, frame): diff --git a/lib-python/2.7/bsddb/__init__.py b/lib-python/2.7/bsddb/__init__.py --- a/lib-python/2.7/bsddb/__init__.py +++ b/lib-python/2.7/bsddb/__init__.py @@ -33,7 +33,7 @@ #---------------------------------------------------------------------- -"""Support for Berkeley DB 4.1 through 4.8 with a simple interface. +"""Support for Berkeley DB 4.3 through 5.3 with a simple interface. For the full featured object oriented interface use the bsddb.db module instead. It mirrors the Oracle Berkeley DB C API. @@ -138,7 +138,7 @@ except _bsddb.DBCursorClosedError: # the database was modified during iteration. abort. pass -# When Python 2.3 not supported in bsddb3, we can change this to "finally" +# When Python 2.4 not supported in bsddb3, we can change this to "finally" except : self._in_iter -= 1 raise @@ -181,7 +181,7 @@ except _bsddb.DBCursorClosedError: # the database was modified during iteration. abort. pass -# When Python 2.3 not supported in bsddb3, we can change this to "finally" +# When Python 2.4 not supported in bsddb3, we can change this to "finally" except : self._in_iter -= 1 raise diff --git a/lib-python/2.7/bsddb/dbobj.py b/lib-python/2.7/bsddb/dbobj.py --- a/lib-python/2.7/bsddb/dbobj.py +++ b/lib-python/2.7/bsddb/dbobj.py @@ -30,12 +30,7 @@ import db if sys.version_info < (2, 6) : - try: - from UserDict import DictMixin - except ImportError: - # DictMixin is new in Python 2.3 - class DictMixin: pass - MutableMapping = DictMixin + from UserDict import DictMixin as MutableMapping else : import collections MutableMapping = collections.MutableMapping @@ -196,6 +191,8 @@ return self._cobj.set_bt_compare(*args, **kwargs) def set_cachesize(self, *args, **kwargs): return self._cobj.set_cachesize(*args, **kwargs) + def set_dup_compare(self, *args, **kwargs) : + return self._cobj.set_dup_compare(*args, **kwargs) def set_flags(self, *args, **kwargs): return self._cobj.set_flags(*args, **kwargs) def set_h_ffactor(self, *args, **kwargs): diff --git a/lib-python/2.7/bsddb/dbshelve.py b/lib-python/2.7/bsddb/dbshelve.py --- a/lib-python/2.7/bsddb/dbshelve.py +++ b/lib-python/2.7/bsddb/dbshelve.py @@ -43,7 +43,7 @@ if sys.version_info < (2, 6) : import cPickle else : - # When we drop support for python 2.3 and 2.4 + # When we drop support for python 2.4 # we could use: (in 2.5 we need a __future__ statement) # # with warnings.catch_warnings(): @@ -51,7 +51,7 @@ # ... # # We can not use "with" as is, because it would be invalid syntax - # in python 2.3, 2.4 and (with no __future__) 2.5. + # in python 2.4 and (with no __future__) 2.5. # Here we simulate "with" following PEP 343 : import warnings w = warnings.catch_warnings() @@ -65,32 +65,12 @@ w.__exit__() del w -#At version 2.3 cPickle switched to using protocol instead of bin -if sys.version_info >= (2, 3): - HIGHEST_PROTOCOL = cPickle.HIGHEST_PROTOCOL -# In python 2.3.*, "cPickle.dumps" accepts no -# named parameters. "pickle.dumps" accepts them, -# so this seems a bug. - if sys.version_info < (2, 4): - def _dumps(object, protocol): - return cPickle.dumps(object, protocol) - else : - def _dumps(object, protocol): - return cPickle.dumps(object, protocol=protocol) - -else: - HIGHEST_PROTOCOL = None - def _dumps(object, protocol): - return cPickle.dumps(object, bin=protocol) - +HIGHEST_PROTOCOL = cPickle.HIGHEST_PROTOCOL +def _dumps(object, protocol): + return cPickle.dumps(object, protocol=protocol) if sys.version_info < (2, 6) : - try: - from UserDict import DictMixin - except ImportError: - # DictMixin is new in Python 2.3 - class DictMixin: pass - MutableMapping = DictMixin + from UserDict import DictMixin as MutableMapping else : import collections MutableMapping = collections.MutableMapping diff --git a/lib-python/2.7/bsddb/dbtables.py b/lib-python/2.7/bsddb/dbtables.py --- a/lib-python/2.7/bsddb/dbtables.py +++ b/lib-python/2.7/bsddb/dbtables.py @@ -30,7 +30,7 @@ if sys.version_info < (2, 6) : import cPickle as pickle else : - # When we drop support for python 2.3 and 2.4 + # When we drop support for python 2.4 # we could use: (in 2.5 we need a __future__ statement) # # with warnings.catch_warnings(): @@ -38,7 +38,7 @@ # ... # # We can not use "with" as is, because it would be invalid syntax - # in python 2.3, 2.4 and (with no __future__) 2.5. + # in python 2.4 and (with no __future__) 2.5. # Here we simulate "with" following PEP 343 : import warnings w = warnings.catch_warnings() diff --git a/lib-python/2.7/bsddb/test/test_all.py b/lib-python/2.7/bsddb/test/test_all.py --- a/lib-python/2.7/bsddb/test/test_all.py +++ b/lib-python/2.7/bsddb/test/test_all.py @@ -392,10 +392,8 @@ return self._dbenv.get_tmp_dir().decode(charset) def get_data_dirs(self) : - # Have to use a list comprehension and not - # generators, because we are supporting Python 2.3. return tuple( - [i.decode(charset) for i in self._dbenv.get_data_dirs()]) + (i.decode(charset) for i in self._dbenv.get_data_dirs())) class DBSequence_py3k(object) : def __init__(self, db, *args, **kwargs) : @@ -484,6 +482,8 @@ print '-=' * 38 print db.DB_VERSION_STRING print 'bsddb.db.version(): %s' % (db.version(), ) + if db.version() >= (5, 0) : + print 'bsddb.db.full_version(): %s' %repr(db.full_version()) print 'bsddb.db.__version__: %s' % db.__version__ print 'bsddb.db.cvsid: %s' % db.cvsid @@ -528,7 +528,8 @@ # This path can be overriden via "set_test_path_prefix()". import os, os.path -get_new_path.prefix=os.path.join(os.sep,"tmp","z-Berkeley_DB") +get_new_path.prefix=os.path.join(os.environ.get("TMPDIR", + os.path.join(os.sep,"tmp")), "z-Berkeley_DB") get_new_path.num=0 def get_test_path_prefix() : diff --git a/lib-python/2.7/bsddb/test/test_basics.py b/lib-python/2.7/bsddb/test/test_basics.py --- a/lib-python/2.7/bsddb/test/test_basics.py +++ b/lib-python/2.7/bsddb/test/test_basics.py @@ -9,6 +9,7 @@ from pprint import pprint import unittest import time +import sys from test_all import db, test_support, verbose, get_new_environment_path, \ get_new_database_path @@ -44,13 +45,6 @@ _numKeys = 1002 # PRIVATE. NOTE: must be an even value - import sys - if sys.version_info < (2, 4): - def assertTrue(self, expr, msg=None): - self.failUnless(expr,msg=msg) - def assertFalse(self, expr, msg=None): - self.failIf(expr,msg=msg) - def setUp(self): if self.useEnv: self.homeDir=get_new_environment_path() @@ -74,14 +68,13 @@ # create and open the DB self.d = db.DB(self.env) if not self.useEnv : - if db.version() >= (4, 2) : - self.d.set_cachesize(*self.cachesize) - cachesize = self.d.get_cachesize() - self.assertEqual(cachesize[0], self.cachesize[0]) - self.assertEqual(cachesize[2], self.cachesize[2]) - # Berkeley DB expands the cache 25% accounting overhead, - # if the cache is small. - self.assertEqual(125, int(100.0*cachesize[1]/self.cachesize[1])) + self.d.set_cachesize(*self.cachesize) + cachesize = self.d.get_cachesize() + self.assertEqual(cachesize[0], self.cachesize[0]) + self.assertEqual(cachesize[2], self.cachesize[2]) + # Berkeley DB expands the cache 25% accounting overhead, + # if the cache is small. + self.assertEqual(125, int(100.0*cachesize[1]/self.cachesize[1])) self.d.set_flags(self.dbsetflags) if self.dbname: self.d.open(self.filename, self.dbname, self.dbtype, @@ -161,7 +154,6 @@ try: d.delete('abcd') except db.DBNotFoundError, val: - import sys if sys.version_info < (2, 6) : self.assertEqual(val[0], db.DB_NOTFOUND) else : @@ -184,7 +176,6 @@ try: d.put('abcd', 'this should fail', flags=db.DB_NOOVERWRITE) except db.DBKeyExistError, val: - import sys if sys.version_info < (2, 6) : self.assertEqual(val[0], db.DB_KEYEXIST) else : @@ -338,7 +329,6 @@ rec = c.next() except db.DBNotFoundError, val: if get_raises_error: - import sys if sys.version_info < (2, 6) : self.assertEqual(val[0], db.DB_NOTFOUND) else : @@ -363,7 +353,6 @@ rec = c.prev() except db.DBNotFoundError, val: if get_raises_error: - import sys if sys.version_info < (2, 6) : self.assertEqual(val[0], db.DB_NOTFOUND) else : @@ -390,7 +379,6 @@ try: n = c.set('bad key') except db.DBNotFoundError, val: - import sys if sys.version_info < (2, 6) : From noreply at buildbot.pypy.org Sun Mar 9 08:29:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 08:29:14 +0100 (CET) Subject: [pypy-commit] pypy default: No-op, but make some standard CPython exports greppable. Message-ID: <20140309072914.A7D301C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69812:c7540026b241 Date: 2014-03-09 08:19 +0100 http://bitbucket.org/pypy/pypy/changeset/c7540026b241/ Log: No-op, but make some standard CPython exports greppable. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -447,6 +447,11 @@ def build_exported_objects(): # Standard exceptions + # PyExc_BaseException, PyExc_Exception, PyExc_ValueError, PyExc_KeyError, + # PyExc_IndexError, PyExc_IOError, PyExc_OSError, PyExc_TypeError, + # PyExc_AttributeError, PyExc_OverflowError, PyExc_ImportError, + # PyExc_NameError, PyExc_MemoryError, PyExc_RuntimeError, + # PyExc_UnicodeEncodeError, PyExc_UnicodeDecodeError, ... for exc_name in exceptions.Module.interpleveldefs.keys(): GLOBALS['PyExc_' + exc_name] = ( 'PyTypeObject*', @@ -454,40 +459,41 @@ # Common types with their own struct for cpyname, pypyexpr in { - "Type": "space.w_type", - "String": "space.w_str", - "Unicode": "space.w_unicode", - "BaseString": "space.w_basestring", - "Dict": "space.w_dict", - "Tuple": "space.w_tuple", - "List": "space.w_list", - "Set": "space.w_set", - "FrozenSet": "space.w_frozenset", - "Int": "space.w_int", - "Bool": "space.w_bool", - "Float": "space.w_float", - "Long": "space.w_long", - "Complex": "space.w_complex", - "ByteArray": "space.w_bytearray", - "MemoryView": "space.gettypeobject(W_MemoryView.typedef)", - "Array": "space.gettypeobject(W_NDimArray.typedef)", - "BaseObject": "space.w_object", - 'None': 'space.type(space.w_None)', - 'NotImplemented': 'space.type(space.w_NotImplemented)', - 'Cell': 'space.gettypeobject(Cell.typedef)', - 'Module': 'space.gettypeobject(Module.typedef)', - 'Property': 'space.gettypeobject(W_Property.typedef)', - 'Slice': 'space.gettypeobject(W_SliceObject.typedef)', - 'Class': 'space.gettypeobject(W_ClassObject.typedef)', - 'StaticMethod': 'space.gettypeobject(StaticMethod.typedef)', - 'CFunction': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)', - 'WrapperDescr': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)' + "PyType_Type": "space.w_type", + "PyString_Type": "space.w_str", + "PyUnicode_Type": "space.w_unicode", + "PyBaseString_Type": "space.w_basestring", + "PyDict_Type": "space.w_dict", + "PyTuple_Type": "space.w_tuple", + "PyList_Type": "space.w_list", + "PySet_Type": "space.w_set", + "PyFrozenSet_Type": "space.w_frozenset", + "PyInt_Type": "space.w_int", + "PyBool_Type": "space.w_bool", + "PyFloat_Type": "space.w_float", + "PyLong_Type": "space.w_long", + "PyComplex_Type": "space.w_complex", + "PyByteArray_Type": "space.w_bytearray", + "PyMemoryView_Type": "space.gettypeobject(W_MemoryView.typedef)", + "PyArray_Type": "space.gettypeobject(W_NDimArray.typedef)", + "PyBaseObject_Type": "space.w_object", + 'PyNone_Type': 'space.type(space.w_None)', + 'PyNotImplemented_Type': 'space.type(space.w_NotImplemented)', + 'PyCell_Type': 'space.gettypeobject(Cell.typedef)', + 'PyModule_Type': 'space.gettypeobject(Module.typedef)', + 'PyProperty_Type': 'space.gettypeobject(W_Property.typedef)', + 'PySlice_Type': 'space.gettypeobject(W_SliceObject.typedef)', + 'PyClass_Type': 'space.gettypeobject(W_ClassObject.typedef)', + 'PyStaticMethod_Type': 'space.gettypeobject(StaticMethod.typedef)', + 'PyCFunction_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)', + 'PyWrapperDescr_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)' }.items(): - GLOBALS['Py%s_Type#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) + GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) - for cpyname in 'Method List Long Dict Tuple Class'.split(): - FORWARD_DECLS.append('typedef struct { PyObject_HEAD } ' - 'Py%sObject' % (cpyname, )) + for cpyname in '''PyMethodObject PyListObject PyLongObject + PyDictObject PyTupleObject PyClassObject'''.split(): + FORWARD_DECLS.append('typedef struct { PyObject_HEAD } %s' + % (cpyname, )) build_exported_objects() def get_structtype_for_ctype(ctype): From noreply at buildbot.pypy.org Sun Mar 9 08:29:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 08:29:15 +0100 (CET) Subject: [pypy-commit] pypy default: Add the test from Johan, skipped for now. Message-ID: <20140309072915.D2E901C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69813:a14a577815fa Date: 2014-03-09 08:28 +0100 http://bitbucket.org/pypy/pypy/changeset/a14a577815fa/ Log: Add the test from Johan, skipped for now. diff --git a/pypy/module/cpyext/test/foo3.c b/pypy/module/cpyext/test/foo3.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/foo3.c @@ -0,0 +1,73 @@ +#include +#include + +PyObject* foo3type_tp_new(PyTypeObject* metatype, PyObject* args, PyObject* kwds) +{ + printf("in foo3type_tp_new, preprocessing...\n"); + PyObject* newType = PyType_Type.tp_new(metatype, args, kwds); + printf("in foo3type_tp_new, postprocessing...\n"); + return newType; +} + +PyTypeObject Foo3Type_Type = { + PyVarObject_HEAD_INIT(0, 0) + /*tp_name*/ "Foo3.Type", + /*tp_basicsize*/ sizeof(PyTypeObject), + /*tp_itemsize*/ 0, + /*tp_dealloc*/ 0, + /*tp_print*/ 0, + /*tp_getattr*/ 0, + /*tp_setattr*/ 0, + /*tp_compare*/ 0, + /*tp_repr*/ 0, + /*tp_as_number*/ 0, + /*tp_as_sequence*/ 0, + /*tp_as_mapping*/ 0, + /*tp_hash*/ 0, + /*tp_call*/ 0, + /*tp_str*/ 0, + /*tp_getattro*/ 0, + /*tp_setattro*/ 0, + /*tp_as_buffer*/ 0, + /*tp_flags*/ Py_TPFLAGS_DEFAULT, + /*tp_doc*/ 0, + /*tp_traverse*/ 0, + /*tp_clear*/ 0, + /*tp_richcompare*/ 0, + /*tp_weaklistoffset*/ 0, + /*tp_iter*/ 0, + /*tp_iternext*/ 0, + /*tp_methods*/ 0, + /*tp_members*/ 0, + /*tp_getset*/ 0, + /*tp_base*/ 0, // set to &PyType_Type in module init function (why can it not be done here?) + /*tp_dict*/ 0, + /*tp_descr_get*/ 0, + /*tp_descr_set*/ 0, + /*tp_dictoffset*/ 0, + /*tp_init*/ 0, + /*tp_alloc*/ 0, + /*tp_new*/ foo3type_tp_new, + /*tp_free*/ 0, + /*tp_is_gc*/ 0, + /*tp_bases*/ 0, + /*tp_mro*/ 0, + /*tp_cache*/ 0, + /*tp_subclasses*/ 0, + /*tp_weaklist*/ 0 +}; + +static PyMethodDef sbkMethods[] = {{NULL, NULL, 0, NULL}}; + +#ifdef _WIN32 + __declspec(dllexport) void // PyModINIT_FUNC is broken on PyPy/Windows +#else + PyMODINIT_FUNC +#endif +initfoo3(void) +{ + PyObject* mod = Py_InitModule("Foo3", sbkMethods); + Foo3Type_Type.tp_base = &PyType_Type; + PyType_Ready(&Foo3Type_Type); + PyModule_AddObject(mod, "Type", (PyObject*)&Foo3Type_Type); +} diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -588,3 +588,9 @@ assert bool(module.newInt(1)) assert bool(module.newInt(-1)) raises(ValueError, bool, module.newInt(-42)) + + def test_tp_new_in_subclass_of_type(self): + py.test.skip("BROKEN") + module = self.import_module(name='foo3') + print 'calling module.Type()...' + module.Type("X", (object,), {}) From noreply at buildbot.pypy.org Sun Mar 9 08:29:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 08:29:17 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140309072917.415331C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69814:3c269e621202 Date: 2014-03-09 08:28 +0100 http://bitbucket.org/pypy/pypy/changeset/3c269e621202/ Log: merge heads diff --git a/lib-python/2.7/test/test_zipfile.py b/lib-python/2.7/test/test_zipfile.py --- a/lib-python/2.7/test/test_zipfile.py +++ b/lib-python/2.7/test/test_zipfile.py @@ -19,7 +19,7 @@ from unittest import skipUnless from test.test_support import TESTFN, TESTFN_UNICODE, TESTFN_ENCODING, \ - run_unittest, findfile, unlink + run_unittest, findfile, unlink, rmtree try: TESTFN_UNICODE.encode(TESTFN_ENCODING) except (UnicodeError, TypeError): @@ -365,7 +365,8 @@ produces the expected result.""" with zipfile.ZipFile(TESTFN2, "w") as zipfp: zipfp.write(TESTFN) - self.assertEqual(zipfp.read(TESTFN), open(TESTFN).read()) + with open(TESTFN,'r') as fid: + self.assertEqual(zipfp.read(TESTFN), fid.read()) @skipUnless(zlib, "requires zlib") def test_per_file_compression(self): @@ -404,11 +405,12 @@ self.assertEqual(writtenfile, correctfile) # make sure correct data is in correct file - self.assertEqual(fdata, open(writtenfile, "rb").read()) + with open(writtenfile, "rb") as fid: + self.assertEqual(fdata, fid.read()) os.remove(writtenfile) # remove the test file subdirectories - shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) + rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) def test_extract_all(self): with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp: @@ -419,12 +421,13 @@ zipfp.extractall() for fpath, fdata in SMALL_TEST_DATA: outfile = os.path.join(os.getcwd(), fpath) - - self.assertEqual(fdata, open(outfile, "rb").read()) + + with open(outfile, "rb") as fid: + self.assertEqual(fdata, fid.read()) os.remove(outfile) # remove the test file subdirectories - shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) + rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) def check_file(self, filename, content): self.assertTrue(os.path.isfile(filename)) @@ -509,12 +512,12 @@ self.assertEqual(writtenfile, correctfile, msg="extract %r" % arcname) self.check_file(correctfile, content) - shutil.rmtree('target') + rmtree('target') with zipfile.ZipFile(TESTFN2, 'r') as zipfp: zipfp.extractall(targetpath) self.check_file(correctfile, content) - shutil.rmtree('target') + rmtree('target') correctfile = os.path.join(os.getcwd(), *fixedname.split('/')) @@ -523,12 +526,12 @@ self.assertEqual(writtenfile, correctfile, msg="extract %r" % arcname) self.check_file(correctfile, content) - shutil.rmtree(fixedname.split('/')[0]) + rmtree(fixedname.split('/')[0]) with zipfile.ZipFile(TESTFN2, 'r') as zipfp: zipfp.extractall() self.check_file(correctfile, content) - shutil.rmtree(fixedname.split('/')[0]) + rmtree(fixedname.split('/')[0]) os.remove(TESTFN2) @@ -593,6 +596,8 @@ def tearDown(self): unlink(TESTFN) unlink(TESTFN2) + if os.path.exists(TESTFN): + os.remove(TESTFN) class TestZip64InSmallFiles(unittest.TestCase): @@ -712,6 +717,12 @@ class PyZipFileTests(unittest.TestCase): + def teardown(self): + if os.path.exists(TESTFN): + os.remove(TESTFN) + if os.path.exists(TESTFN2): + os.remove(TESTFN2) + def test_write_pyfile(self): with zipfile.PyZipFile(TemporaryFile(), "w") as zipfp: fn = __file__ @@ -773,11 +784,14 @@ self.assertNotIn('mod2.txt', names) finally: - shutil.rmtree(TESTFN2) + rmtree(TESTFN2) def test_write_non_pyfile(self): + if os.path.exists(TESTFN): + os.remove(TESTFN) with zipfile.PyZipFile(TemporaryFile(), "w") as zipfp: - open(TESTFN, 'w').write('most definitely not a python file') + with open(TESTFN, 'w') as fid: + fid.write('most definitely not a python file') self.assertRaises(RuntimeError, zipfp.writepy, TESTFN) os.remove(TESTFN) @@ -940,8 +954,9 @@ self.assertRaises(RuntimeError, zipf.open, "foo.txt") self.assertRaises(RuntimeError, zipf.testzip) self.assertRaises(RuntimeError, zipf.writestr, "bogus.txt", "bogus") - open(TESTFN, 'w').write('zipfile test data') - self.assertRaises(RuntimeError, zipf.write, TESTFN) + with open(TESTFN, 'w') as fid: + fid.write('zipfile test data') + self.assertRaises(RuntimeError, zipf.write, TESTFN) def test_bad_constructor_mode(self): """Check that bad modes passed to ZipFile constructor are caught.""" @@ -1126,6 +1141,7 @@ pass try: zipf = zipfile.ZipFile(TESTFN, mode="r") + zipf.close() except zipfile.BadZipfile: self.fail("Unable to create empty ZIP file in 'w' mode") @@ -1133,6 +1149,7 @@ pass try: zipf = zipfile.ZipFile(TESTFN, mode="r") + zipf.close() except: self.fail("Unable to create empty ZIP file in 'a' mode") @@ -1151,6 +1168,8 @@ def tearDown(self): unlink(TESTFN) unlink(TESTFN2) + if os.path.exists(TESTFN): + os.remove(TESTFN) class DecryptionTests(unittest.TestCase): @@ -1201,16 +1220,28 @@ def test_bad_password(self): self.zip.setpassword("perl") - self.assertRaises(RuntimeError, self.zip.read, "test.txt") + try: + self.assertRaises(RuntimeError, self.zip.read, "test.txt") + finally: + self.zip.close() self.zip2.setpassword("perl") - self.assertRaises(RuntimeError, self.zip2.read, "zero") + try: + self.assertRaises(RuntimeError, self.zip2.read, "zero") + finally: + self.zip2.close() @skipUnless(zlib, "requires zlib") def test_good_password(self): self.zip.setpassword("python") - self.assertEqual(self.zip.read("test.txt"), self.plain) + try: + self.assertEqual(self.zip.read("test.txt"), self.plain) + finally: + self.zip.close() self.zip2.setpassword("12345") - self.assertEqual(self.zip2.read("zero"), self.plain2) + try: + self.assertEqual(self.zip2.read("zero"), self.plain2) + finally: + self.zip2.close() class TestsWithRandomBinaryFiles(unittest.TestCase): @@ -1224,8 +1255,10 @@ fp.write(self.data) def tearDown(self): - unlink(TESTFN) - unlink(TESTFN2) + if os.path.exists(TESTFN): + os.remove(TESTFN) + if os.path.exists(TESTFN2): + os.remove(TESTFN2) def make_test_archive(self, f, compression): # Create the ZIP archive @@ -1329,12 +1362,11 @@ # Verify that (when the ZipFile is in control of creating file objects) # multiple open() calls can be made without interfering with each other. with zipfile.ZipFile(TESTFN2, mode="r") as zipf: - zopen1 = zipf.open('ones') - zopen2 = zipf.open('ones') - data1 = zopen1.read(500) - data2 = zopen2.read(500) - data1 += zopen1.read(500) - data2 += zopen2.read(500) + with zipf.open('ones') as zopen1, zipf.open('ones') as zopen2: + data1 = zopen1.read(500) + data2 = zopen2.read(500) + data1 += zopen1.read(500) + data2 += zopen2.read(500) self.assertEqual(data1, data2) def test_different_file(self): @@ -1394,14 +1426,14 @@ def test_store_dir(self): os.mkdir(os.path.join(TESTFN2, "x")) - zipf = zipfile.ZipFile(TESTFN, "w") - zipf.write(os.path.join(TESTFN2, "x"), "x") - self.assertTrue(zipf.filelist[0].filename.endswith("x/")) + with zipfile.ZipFile(TESTFN, "w") as zipf: + zipf.write(os.path.join(TESTFN2, "x"), "x") + self.assertTrue(zipf.filelist[0].filename.endswith("x/")) def tearDown(self): - shutil.rmtree(TESTFN2) + rmtree(TESTFN2) if os.path.exists(TESTFN): - unlink(TESTFN) + os.remove(TESTFN) class UniversalNewlineTests(unittest.TestCase): @@ -1413,7 +1445,8 @@ for n, s in enumerate(self.seps): self.arcdata[s] = s.join(self.line_gen) + s self.arcfiles[s] = '%s-%d' % (TESTFN, n) - open(self.arcfiles[s], "wb").write(self.arcdata[s]) + with open(self.arcfiles[s], "wb") as fid: + fid.write(self.arcdata[s]) def make_test_archive(self, f, compression): # Create the ZIP archive @@ -1482,8 +1515,9 @@ # Read the ZIP archive with zipfile.ZipFile(f, "r") as zipfp: for sep, fn in self.arcfiles.items(): - for line, zipline in zip(self.line_gen, zipfp.open(fn, "rU")): - self.assertEqual(zipline, line + '\n') + with zipfp.open(fn, "rU") as fid: + for line, zipline in zip(self.line_gen, fid): + self.assertEqual(zipline, line + '\n') def test_read_stored(self): for f in (TESTFN2, TemporaryFile(), StringIO()): diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -328,17 +328,16 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisvalue = args.pop(0) + thisvalue = args[0] thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) keepalives, newargs, argtypes, outargs, errcheckargs = ( - self._convert_args(argtypes, args, kwargs)) - args.insert(0, thisvalue) + self._convert_args(argtypes, args[1:], kwargs)) newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None keepalives, newargs, argtypes, outargs, errcheckargs = ( - self._convert_args(argtypes, args, kwargs)) + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_function.py @@ -36,11 +36,13 @@ return self._value lib_m = 'm' +has_sinf = True if sys.platform == 'win32': #there is a small chance this fails on Mingw via environ $CC import distutils.ccompiler if distutils.ccompiler.get_default_compiler() == 'msvc': lib_m = 'msvcrt' + has_sinf = False class TestFunction(object): Backend = CTypesBackend @@ -55,6 +57,8 @@ assert x == math.sin(1.23) def test_sinf(self): + if not has_sinf: + py.test.skip("sinf not available") ffi = FFI(backend=self.Backend()) ffi.cdef(""" float sinf(float x); diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -236,8 +236,14 @@ return 42 con.set_authorizer(authorizer_cb) with pytest.raises(_sqlite3.OperationalError) as e: - con.execute('select 42') - assert str(e.value) == 'authorizer malfunction' + con.execute('select 123') + major, minor, micro = _sqlite3.sqlite_version.split('.')[:3] + if (int(major), int(minor), int(micro)) >= (3, 6, 14): + assert str(e.value) == 'authorizer malfunction' + else: + assert str(e.value) == \ + ("illegal return value (1) from the authorization function - " + "should be SQLITE_OK, SQLITE_IGNORE, or SQLITE_DENY") def test_issue1573(con): diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -1148,6 +1148,9 @@ address_to_fill=None): # port_or_service is a string, not an int (but try str(port_number)). assert port_or_service is None or isinstance(port_or_service, str) + if _c._MACOSX: + if port_or_service is None or port_or_service == '0': + port_or_service = '00' hints = lltype.malloc(_c.addrinfo, flavor='raw', zero=True) rffi.setintfield(hints, 'c_ai_family', family) rffi.setintfield(hints, 'c_ai_socktype', socktype) diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -328,6 +328,11 @@ found = True assert found, lst +def test_getaddrinfo_osx_crash(): + # see CPython issue17269 + for port in [None, '0', '00']: + getaddrinfo('localhost', port, 0, 0, 0, AI_NUMERICSERV) + def test_connect_ex(): s = RSocket() err = s.connect_ex(INETAddress('0.0.0.0', 0)) # should not work From noreply at buildbot.pypy.org Sun Mar 9 08:35:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 08:35:31 +0100 (CET) Subject: [pypy-commit] pypy default: oups Message-ID: <20140309073531.563961C0686@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69815:99278bb89bd8 Date: 2014-03-09 08:34 +0100 http://bitbucket.org/pypy/pypy/changeset/99278bb89bd8/ Log: oups diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -590,7 +590,7 @@ raises(ValueError, bool, module.newInt(-42)) def test_tp_new_in_subclass_of_type(self): - py.test.skip("BROKEN") + skip("BROKEN") module = self.import_module(name='foo3') print 'calling module.Type()...' module.Type("X", (object,), {}) From noreply at buildbot.pypy.org Sun Mar 9 10:54:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 10:54:30 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: in-progress Message-ID: <20140309095430.394E81C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69816:e319f447d2a2 Date: 2014-03-09 10:53 +0100 http://bitbucket.org/pypy/pypy/changeset/e319f447d2a2/ Log: in-progress diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -306,28 +306,28 @@ @staticmethod @rgc.no_collect - def _release_gil_asmgcc(css): - # similar to trackgcroot.py:pypy_asm_stackwalk, first part - from rpython.memory.gctransform import asmgcroot - new = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, css) - next = asmgcroot.gcrootanchor.next - new.next = next - new.prev = asmgcroot.gcrootanchor - asmgcroot.gcrootanchor.next = new - next.prev = new - # and now release the GIL - before = rffi.aroundstate.before - if before: - before() - - @staticmethod - @rgc.no_collect - def _reacquire_gil_asmgcc(css): - # first reacquire the GIL - after = rffi.aroundstate.after - if after: - after() - # similar to trackgcroot.py:pypy_asm_stackwalk, second part + def _reacquire_gil_asmgcc(css, old_rpy_fastgil): + # Only called if rpy_fastgil was reset to a different value + # by another thread or by a callback. See description in + # transator/c/src/thread_pthread.c. + if not old_rpy_fastgil: + # first reacquire the GIL + after = rffi.aroundstate.after + if after: + after() + else: + # stole the GIL from a different thread that is also + # currently in an external call from the jit. Attach + # the 'old_rpy_fastgil' into the chained list. + from rpython.memory.gctransform import asmgcroot + oth = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, old_rpy_fastgil) + next = asmgcroot.gcrootanchor.next + oth.next = next + oth.prev = asmgcroot.gcrootanchor + asmgcroot.gcrootanchor.next = oth + next.prev = oth + # similar to trackgcroot.py:pypy_asm_stackwalk, second part: + # detach the 'css' from the chained list from rpython.memory.gctransform import asmgcroot old = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, css) prev = old.prev @@ -337,42 +337,29 @@ @staticmethod @rgc.no_collect - def _release_gil_shadowstack(): - before = rffi.aroundstate.before - if before: - before() - - @staticmethod - @rgc.no_collect def _reacquire_gil_shadowstack(): + # Simplified version of _reacquire_gil_asmgcc(): in shadowstack mode, + # rpy_fastgil contains only 0 or 1, and this must only be called when + # the old value stored in rpy_fastgil was 0. after = rffi.aroundstate.after if after: after() - @staticmethod - def _no_op(): - pass - - _NOARG_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) - _CLOSESTACK_FUNC = lltype.Ptr(lltype.FuncType([rffi.LONGP], - lltype.Void)) + _REACQGIL0_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) + _REACQGIL2_FUNC = lltype.Ptr(lltype.FuncType([rffi.CCHARP, rffi.CCHARP], + lltype.Void)) def _build_release_gil(self, gcrootmap): if gcrootmap is None: - releasegil_func = llhelper(self._NOARG_FUNC, self._no_op) - reacqgil_func = llhelper(self._NOARG_FUNC, self._no_op) + pass elif gcrootmap.is_shadow_stack: - releasegil_func = llhelper(self._NOARG_FUNC, - self._release_gil_shadowstack) - reacqgil_func = llhelper(self._NOARG_FUNC, + reacqgil_func = llhelper(self._REACQGIL0_FUNC, self._reacquire_gil_shadowstack) + self.reacqgil_addr = self.cpu.cast_ptr_to_int(reacqgil_func) else: - releasegil_func = llhelper(self._CLOSESTACK_FUNC, - self._release_gil_asmgcc) - reacqgil_func = llhelper(self._CLOSESTACK_FUNC, + reacqgil_func = llhelper(self._REACQGIL2_FUNC, self._reacquire_gil_asmgcc) - self.releasegil_addr = self.cpu.cast_ptr_to_int(releasegil_func) - self.reacqgil_addr = self.cpu.cast_ptr_to_int(reacqgil_func) + self.reacqgil_addr = self.cpu.cast_ptr_to_int(reacqgil_func) def _is_asmgcc(self): gcrootmap = self.cpu.gc_ll_descr.gcrootmap diff --git a/rpython/jit/backend/llsupport/callbuilder.py b/rpython/jit/backend/llsupport/callbuilder.py --- a/rpython/jit/backend/llsupport/callbuilder.py +++ b/rpython/jit/backend/llsupport/callbuilder.py @@ -1,4 +1,6 @@ from rpython.rlib.clibffi import FFI_DEFAULT_ABI +from rpython.rlib import objectmodel + class AbstractCallBuilder(object): @@ -42,20 +44,22 @@ def emit_call_release_gil(self): """Emit a CALL_RELEASE_GIL, including calls to releasegil_addr and reacqgil_addr.""" + asmgcc = self.asm._is_asmgcc() + fastgil = objectmodel.prepare_enter_callback_from_jit(is_asmgcc) self.select_call_release_gil_mode() self.prepare_arguments() self.push_gcmap_for_call_release_gil() - self.call_releasegil_addr_and_move_real_arguments() + self.call_releasegil_addr_and_move_real_arguments(fastgil) self.emit_raw_call() self.restore_stack_pointer() - self.move_real_result_and_call_reacqgil_addr() + self.move_real_result_and_call_reacqgil_addr(fastgil) self.pop_gcmap() self.load_result() - def call_releasegil_addr_and_move_real_arguments(self): + def call_releasegil_addr_and_move_real_arguments(self, fastgil): raise NotImplementedError - def move_real_result_and_call_reacqgil_addr(self): + def move_real_result_and_call_reacqgil_addr(self, fastgil): raise NotImplementedError def select_call_release_gil_mode(self): diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -41,7 +41,6 @@ self.current_esp = 0 # 0 or (usually) negative, counted in bytes def select_call_release_gil_mode(self): - """Overridden in CallBuilder64""" AbstractCallBuilder.select_call_release_gil_mode(self) if self.asm._is_asmgcc(): from rpython.memory.gctransform import asmgcroot @@ -100,13 +99,13 @@ self.asm.set_extra_stack_depth(self.mc, 0) self.asm.pop_gcmap(self.mc) - def call_releasegil_addr_and_move_real_arguments(self): - initial_esp = self.current_esp - self.save_register_arguments() + def call_releasegil_addr_and_move_real_arguments(self, fastgil): + from rpython.jit.backend.x86.assembler import heap # if not self.asm._is_asmgcc(): # the helper takes no argument self.change_extra_stack_depth = False + css_value = imm(1) else: from rpython.memory.gctransform import asmgcroot # build a 'css' structure on the stack: 2 words for the linkage, @@ -120,59 +119,63 @@ index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP) self.mc.MOV_sr(index_of_ebp, ebp.value) # MOV [css.ebp], EBP # Save the "return address": we pretend that it's css - if IS_X86_32: - reg = eax - elif IS_X86_64: - reg = edi - self.mc.LEA_rs(reg.value, css) # LEA reg, [css] + self.mc.LEA_rs(eax.value, css) # LEA eax, [css] frame_ptr = css + WORD * (2+asmgcroot.FRAME_PTR) - self.mc.MOV_sr(frame_ptr, reg.value) # MOV [css.frame], reg + self.mc.MOV_sr(frame_ptr, eax.value) # MOV [css.frame], eax # Set up jf_extra_stack_depth to pretend that the return address # was at css, and so our stack frame is supposedly shorter by # (PASS_ON_MY_FRAME-JIT_USE_WORDS+1) words delta = PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS + 1 self.change_extra_stack_depth = True self.asm.set_extra_stack_depth(self.mc, -delta * WORD) - # Call the closestack() function (also releasing the GIL) - # with 'reg' as argument - if IS_X86_32: - self.subtract_esp_aligned(1) - self.mc.MOV_sr(0, reg.value) - #else: - # on x86_64, reg is edi so that it is already correct + css_value = eax # - self.mc.CALL(imm(self.asm.releasegil_addr)) + self.mc.MOV(heap(fastgil), css_value) # if not we_are_translated(): # for testing: we should not access self.mc.ADD(ebp, imm(1)) # ebp any more + + def move_real_result_and_call_reacqgil_addr(self, fastgil): + from rpython.jit.backend.x86.assembler import heap + from rpython.jit.backend.x86 import rx86 # - self.restore_register_arguments() - self.restore_stack_pointer(initial_esp) - - def save_register_arguments(self): - """Overridden in CallBuilder64""" - - def restore_register_arguments(self): - """Overridden in CallBuilder64""" - - def move_real_result_and_call_reacqgil_addr(self): # save the result we just got (in eax/eax+edx/st(0)/xmm0) self.save_result_value() # call the reopenstack() function (also reacquiring the GIL) + mc = self.mc if not self.asm._is_asmgcc(): - css = 0 # the helper takes no argument + css_value = imm(1) + old_value = edx else: from rpython.memory.gctransform import asmgcroot css = WORD * (PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS) if IS_X86_32: - reg = eax + css_value = ecx + old_value = edx elif IS_X86_64: - reg = edi - self.mc.LEA_rs(reg.value, css) - if IS_X86_32: - self.mc.MOV_sr(0, reg.value) + css_value = edi + old_value = esi + mc.LEA_rs(css_value.value, css) # - self.mc.CALL(imm(self.asm.reacqgil_addr)) + mc.XOR_rr(old_value.value, old_value.value) + if rx86.fits_in_32bits(fastgil): + mc.XCHG_rj(old_value.value, fastgil) + else: + mc.MOV_ri(X86_64_SCRATCH_REG.value, fastgil) + mc.XCHG_rm(old_value.value, (X86_64_SCRATCH_REG.value, 0)) + mc.CMP(old_value, css_value) + mc.J_il8(rx86.Conditions['E'], 0) + je_location = mc.get_relative_pos() + # + if IS_X86_32: + mc.MOV_sr(4, css_value.value) + mc.MOV_sr(0, old_value.value) + mc.CALL(imm(self.asm.reacqgil_addr)) + # + # patch the JE above + offset = mc.get_relative_pos() - je_location + assert 0 < offset <= 127 + mc.overwrite(je_location-1, chr(offset)) # if not we_are_translated(): # for testing: now we can accesss self.mc.SUB(ebp, imm(1)) # ebp again @@ -264,33 +267,32 @@ CallBuilderX86.load_result(self) def save_result_value(self): - # Temporarily save the result value into [ESP+4]. We use "+4" - # in order to leave the word at [ESP+0] free, in case it's needed + # Temporarily save the result value into [ESP+8]. We use "+8" + # in order to leave the two initial words free, in case it's needed if self.ressize == 0: # void return return if self.resloc.is_float(): # a float or a long long return - self.tmpresloc = RawEspLoc(4, FLOAT) + self.tmpresloc = RawEspLoc(8, FLOAT) if self.restype == 'L': - self.mc.MOV_sr(4, eax.value) # long long - self.mc.MOV_sr(8, edx.value) + self.mc.MOV_sr(8, eax.value) # long long + self.mc.MOV_sr(12, edx.value) else: - self.mc.FSTPL_s(4) # float return + self.mc.FSTPL_s(8) # float return else: - self.tmpresloc = RawEspLoc(4, INT) + self.tmpresloc = RawEspLoc(8, INT) if self.restype == 'S': - self.mc.FSTPS_s(4) + self.mc.FSTPS_s(8) else: assert self.restype == INT assert self.ressize <= WORD - self.mc.MOV_sr(4, eax.value) + self.mc.MOV_sr(8, eax.value) class CallBuilder64(CallBuilderX86): ARGUMENTS_GPR = [edi, esi, edx, ecx, r8, r9] ARGUMENTS_XMM = [xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7] - DONT_MOVE_GPR = [] _ALL_CALLEE_SAVE_GPR = [ebx, r12, r13, r14, r15] next_arg_gpr = 0 @@ -303,13 +305,6 @@ res = self.ARGUMENTS_GPR[i] except IndexError: return None - if hint in self.DONT_MOVE_GPR: - for j in range(i): - if hint is self.ARGUMENTS_GPR[j]: - break - else: - self.ARGUMENTS_GPR[i] = hint - res = hint return res def _unused_xmm(self): @@ -320,51 +315,6 @@ except IndexError: return None - def _permute_to_prefer_unused_registers(self, lst): - # permute 'lst' so that it starts with registers that are not - # in 'self.already_used', and ends with registers that are. - N = len(lst) - i = 0 - while i < N: - reg = lst[i] - if reg in self.already_used: - # move this reg to the end, and decrement N - N -= 1 - assert N >= i - lst[N], lst[i] = lst[i], lst[N] - else: - i += 1 - - def select_call_release_gil_mode(self): - CallBuilderX86.select_call_release_gil_mode(self) - # We have to copy the arguments around a bit more in this mode, - # but on the other hand we don't need prepare_arguments() moving - # them in precisely the final registers. Here we look around for - # unused registers that may be more likely usable. - from rpython.jit.backend.x86.regalloc import X86_64_RegisterManager - from rpython.jit.backend.x86.regalloc import X86_64_XMMRegisterManager - self.already_used = {} - for loc in self.arglocs: - self.already_used[loc] = None - # - lst = X86_64_RegisterManager.save_around_call_regs[:] - self._permute_to_prefer_unused_registers(lst) - # - extra = [] - for reg in self.asm._regalloc.rm.free_regs: - if (reg not in self.already_used and - reg in self._ALL_CALLEE_SAVE_GPR): - extra.append(reg) - self.free_callee_save_gprs = extra - lst = extra + lst - # - self.ARGUMENTS_GPR = lst[:len(self.ARGUMENTS_GPR)] - self.DONT_MOVE_GPR = self._ALL_CALLEE_SAVE_GPR - # - lst = X86_64_XMMRegisterManager.save_around_call_regs[:] - self._permute_to_prefer_unused_registers(lst) - self.ARGUMENTS_XMM = lst[:len(self.ARGUMENTS_XMM)] - def prepare_arguments(self): src_locs = [] dst_locs = [] @@ -474,49 +424,6 @@ assert self.restype == INT self.mc.MOV(self.tmpresloc, eax) - def save_register_arguments(self): - # Save the argument registers, which are given by self.ARGUMENTS_xxx. - n_gpr = min(self.next_arg_gpr, len(self.ARGUMENTS_GPR)) - n_xmm = min(self.next_arg_xmm, len(self.ARGUMENTS_XMM)) - n_saved_regs = n_gpr + n_xmm - for i in range(n_gpr): - if self.ARGUMENTS_GPR[i] in self._ALL_CALLEE_SAVE_GPR: - n_saved_regs -= 1 # don't need to save it - self.subtract_esp_aligned(n_saved_regs) - # - n = 0 - for i in range(n_gpr): - if self.ARGUMENTS_GPR[i] not in self._ALL_CALLEE_SAVE_GPR: - self.mc.MOV_sr(n * WORD, self.ARGUMENTS_GPR[i].value) - n += 1 - for i in range(n_xmm): - self.mc.MOVSD_sx(n * WORD, self.ARGUMENTS_XMM[i].value) - n += 1 - assert n == n_saved_regs - self.n_saved_regs = n_saved_regs - - def restore_register_arguments(self): - # Restore the saved values into the *real* registers used for calls - # --- which are not self.ARGUMENTS_xxx! - n_gpr = min(self.next_arg_gpr, len(self.ARGUMENTS_GPR)) - n_xmm = min(self.next_arg_xmm, len(self.ARGUMENTS_XMM)) - # - n = 0 - for i in range(n_gpr): - tgtvalue = CallBuilder64.ARGUMENTS_GPR[i].value - if self.ARGUMENTS_GPR[i] not in self._ALL_CALLEE_SAVE_GPR: - self.mc.MOV_rs(tgtvalue, n * WORD) - n += 1 - else: - self.mc.MOV_rr(tgtvalue, self.ARGUMENTS_GPR[i].value) - for i in range(n_xmm): - self.mc.MOVSD_xs(CallBuilder64.ARGUMENTS_XMM[i].value, n * WORD) - n += 1 - assert n == self.n_saved_regs - # - if isinstance(self.fnloc, RegLoc): # fix this register - self.fnloc = CallBuilder64.ARGUMENTS_GPR[n_gpr - 1] - if IS_X86_32: CallBuilder = CallBuilder32 diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -558,7 +558,7 @@ # XXX: Only here for testing purposes..."as" happens the encode the # registers in the opposite order that we would otherwise do in a # register-register exchange. - #XCHG_rr = insn(rex_w, '\x87', register(1), register(2,8), '\xC0') + XCHG_rr = insn(rex_w, '\x87', register(1), register(2,8), '\xC0') JMP_l = insn('\xE9', relative(1)) JMP_r = insn(rex_nw, '\xFF', orbyte(4<<3), register(1), '\xC0') @@ -743,7 +743,7 @@ define_modrm_modes('SQRTSD_x*', ['\xF2', rex_nw, '\x0F\x51', register(1,8)], regtype='XMM') -#define_modrm_modes('XCHG_r*', [rex_w, '\x87', register(1, 8)]) +define_modrm_modes('XCHG_r*', [rex_w, '\x87', register(1, 8)]) define_modrm_modes('ADDSD_x*', ['\xF2', rex_nw, '\x0F\x58', register(1, 8)], regtype='XMM') define_modrm_modes('ADDPD_x*', ['\x66', rex_nw, '\x0F\x58', register(1, 8)], regtype='XMM') diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -589,6 +589,19 @@ llhelper(rffi.AroundFnPtr, before) llhelper(rffi.AroundFnPtr, after) +def _enter_callback_from_jit(): + from rpython.rlib import rthread + rthread.gil_enter_callback_without_gil() # no need for errno saving + +def prepare_enter_callback_from_jit(is_asmgcc): + from rpython.rlib import rthread + from rpython.rtyper.lltypesystem import rffi + if rffi.aroundstate.after is None: + rffi.aroundstate.after = _enter_callback_from_jit + from rpython.rtyper.annlowlevel import llhelper + llhelper(rffi.AroundFnPtr, _enter_callback_from_jit) + return rthread.get_fastgil_addr_raw(is_asmgcc) + def is_in_callback(): from rpython.rtyper.lltypesystem import rffi return rffi.stackcounter.stacks_counter > 1 diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -87,6 +87,23 @@ _nowrapper=True) gil_acquire = llexternal('RPyGilAcquire', [], lltype.Void, _nowrapper=True) +gil_enter_callback_without_gil = ( + llexternal('RPyEnterCallbackWithoutGil', [], lltype.Void, + _nowrapper=True)) + + at specialize.memo() +def _fetch_fastgil(rpy_fastgil_value): + eci = ExternalCompilationInfo( + pre_include_bits = ['#define RPY_FASTGIL %d' % rpy_fastgil_value]) + return rffi.llexternal('RPyFetchFastGil', [], lltype.Signed, + compilation_info=eci, sandboxsafe=True) + +def get_fastgil_addr_raw(is_asmgcc): + if is_asmgcc: # must be constant! + return _fetch_fastgil(42) + else: + return _fetch_fastgil(1) + def allocate_lock(): return Lock(allocate_ll_lock()) diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -552,6 +552,14 @@ if (pending_acquires >= 0) assert_has_the_gil(); #endif + /* Note that 'pending_acquires' is only manipulated when we hold the + GIL, with one exception: RPyGilAcquire() increases it by one + before it waits for the GIL mutex. Thus the only race condition + here should be harmless: the other thread already increased + 'pending_acquires' but is still not in the pthread_mutex_lock(). + That's fine. Note that we release the mutex in the + pthread_cond_wait() below. + */ if (pending_acquires <= 0) return 0; atomic_add(&pending_acquires, 1L); @@ -575,24 +583,69 @@ ASSERT_STATUS(pthread_cond_signal(&cond_gil)); } -#ifdef RPY_FASTGIL_VARNAME +#ifdef RPY_FASTGIL #include +static void *rpy_fastgil = NULL; + +Signed RPyFetchFastGil(void) +{ + return (Signed)(&rpy_fastgil); +} + static inline void *atomic_xchg(void **ptr, void *value) { void *result; - asm volatile ( #if defined(__amd64__) - "xchgq %0, %1 /* automatically locked */" + asm volatile ("xchgq %0, %1 /* automatically locked */" + : "r"(result) : "0"(value), "m"(*ptr) : "memory"); #elif defined(__i386__) - "xchgl %0, %1 /* automatically locked */" + asm volatile ("xchgl %0, %1 /* automatically locked */" + : "r"(result) : "0"(value), "m"(*ptr) : "memory"); #else -# error "RPY_FASTGIL_VARNAME: only for x86 right now" + /* requires gcc >= 4.1 */ + while (1) { + result = *ptr; + if (__sync_bool_compare_and_swap(ptr, result, value)) + break; + } #endif - : "r"(result) : "0"(value), "m"(*ptr) : "memory"); return result; } +int RPyEnterCallbackWithoutGil(void) +{ + /* this function must be used when entering callbacks as long as + we don't have a real GIL. It only checks for a non-null value + in 'rpy_fastgil'. + + Note: doesn't use any pthread_xx function, so is errno-safe. + */ + void *fastgilvalue; + fastgilvalue = atomic_xchg(&rpy_fastgil, NULL); + if (fastgilvalue != NULL) { + /* yes, succeeded. We know that the other thread is before + the return to JITted assembler from the C function call. + The JITted assembler will definitely call RPyGilAcquire() + then. So we can just pretend that the GIL --- which is + still acquired --- is ours now. We only need to fix + the asmgcc linked list. + */ +#if RPY_FASTGIL == 42 /* special value to mean "asmgcc" */ + struct pypy_ASM_FRAMEDATA_HEAD0 *new = + (struct pypy_ASM_FRAMEDATA_HEAD0 *)fastgilvalue; + struct pypy_ASM_FRAMEDATA_HEAD0 *root = &pypy_g_ASM_FRAMEDATA_HEAD; + struct pypy_ASM_FRAMEDATA_HEAD0 *next = root->as_next; + new->as_next = next; + new->as_prev = root; + root->as_next = new; + next->as_prev = new; +#endif + return 1; + } + return 0; +} + static inline timespec_add(struct timespec *t, long incr) { long nsec = t->tv_nsec + incr; @@ -609,8 +662,7 @@ /* Support for the JIT, which generates calls to external C functions using the following very fast pattern: - * the global variable 'RPY_FASTGIL_VARNAME' (a macro naming the - real variable) contains normally 0 + * the global variable 'rpy_fastgil' contains normally 0 * before doing an external C call, the generated assembler sets this global variable to an in-stack pointer to its @@ -651,26 +703,8 @@ while (1) { /* try to see if we can steal the fast GIL */ - void *fastgilvalue; - fastgilvalue = atomic_xchg(&RPY_FASTGIL_VARNAME, NULL); - if (fastgilvalue != NULL) { - /* yes, succeeded. We know that the other thread is before - the return to JITted assembler from the C function call. - The JITted assembler will definitely call RPyGilAcquire() - then. So we can just pretend that the GIL --- which is - still acquired --- is ours now. We only need to fix - the asmgcc linked list. - */ - struct pypy_ASM_FRAMEDATA_HEAD0 *new = - (struct pypy_ASM_FRAMEDATA_HEAD0 *)fastgilvalue; - struct pypy_ASM_FRAMEDATA_HEAD0 *root = &pypy_g_ASM_FRAMEDATA_HEAD; - struct pypy_ASM_FRAMEDATA_HEAD0 *next = root->as_next; - new->as_next = next; - new->as_prev = root; - root->as_next = new; - next->as_prev = new; + if (RPyEnterCallbackWithoutGil()) return; - } /* sleep for a bit of time */ clock_gettime(CLOCK_REALTIME, &t); @@ -704,7 +738,7 @@ return; } atomic_add(&pending_acquires, 1L); -#ifdef RPY_FASTGIL_VARNAME +#ifdef RPY_FASTGIL _acquire_gil_or_wait_for_fastgil_to_be_nonzero(); #else ASSERT_STATUS(pthread_mutex_lock(&mutex_gil)); @@ -713,6 +747,3 @@ assert_has_the_gil(); _debug_print("RPyGilAcquire\n"); } - -XXX even without a gil, we need to check at least for a RPY_FASTGIL_VARNAME -that is not null, in callbacks From noreply at buildbot.pypy.org Sun Mar 9 10:58:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 10:58:11 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: typo Message-ID: <20140309095811.1CA0E1C0686@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69817:9b92b2968815 Date: 2014-03-09 10:57 +0100 http://bitbucket.org/pypy/pypy/changeset/9b92b2968815/ Log: typo diff --git a/rpython/jit/backend/llsupport/callbuilder.py b/rpython/jit/backend/llsupport/callbuilder.py --- a/rpython/jit/backend/llsupport/callbuilder.py +++ b/rpython/jit/backend/llsupport/callbuilder.py @@ -44,7 +44,7 @@ def emit_call_release_gil(self): """Emit a CALL_RELEASE_GIL, including calls to releasegil_addr and reacqgil_addr.""" - asmgcc = self.asm._is_asmgcc() + is_asmgcc = self.asm._is_asmgcc() fastgil = objectmodel.prepare_enter_callback_from_jit(is_asmgcc) self.select_call_release_gil_mode() self.prepare_arguments() From noreply at buildbot.pypy.org Sun Mar 9 10:59:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 10:59:43 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: Add an "XXX implement me" in the Windows section Message-ID: <20140309095943.665001C0686@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69818:e7c59506542b Date: 2014-03-09 10:59 +0100 http://bitbucket.org/pypy/pypy/changeset/e7c59506542b/ Log: Add an "XXX implement me" in the Windows section diff --git a/rpython/translator/c/src/thread_nt.c b/rpython/translator/c/src/thread_nt.c --- a/rpython/translator/c/src/thread_nt.c +++ b/rpython/translator/c/src/thread_nt.c @@ -243,3 +243,8 @@ EnterCriticalSection(&mutex_gil); InterlockedDecrement(&pending_acquires); } + +#ifdef RPY_FASTGIL +# error "XXX implement me" +InterlockedExchangePointer +#endif From noreply at buildbot.pypy.org Sun Mar 9 11:34:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 11:34:12 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: Replace the aroundstate.after()/aroundstate.before() around callbacks with something more explicit: Message-ID: <20140309103412.0C7B91C1191@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69819:189d5700daf9 Date: 2014-03-09 11:33 +0100 http://bitbucket.org/pypy/pypy/changeset/189d5700daf9/ Log: Replace the aroundstate.after()/aroundstate.before() around callbacks with something more explicit: aroundstate.enter_callback()/aroundstate.leave_callback(). Allows us to do an annotator-friendly special case for entering callbacks in case we have a JIT but no threads (yet). diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -121,25 +121,21 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): - after = rffi.aroundstate.after - if after: after() + rffi.aroundstate.enter_callback() source = rffi.charp2str(ll_source) res = _pypy_execute_source(source) - before = rffi.aroundstate.before - if before: before() + rffi.aroundstate.leave_callback() return rffi.cast(rffi.INT, res) @entrypoint('main', [rffi.CCHARP, lltype.Signed], c_name='pypy_execute_source_ptr') def pypy_execute_source_ptr(ll_source, ll_ptr): - after = rffi.aroundstate.after - if after: after() + rffi.aroundstate.enter_callback() source = rffi.charp2str(ll_source) space.setitem(w_globals, space.wrap('c_argument'), space.wrap(ll_ptr)) res = _pypy_execute_source(source) - before = rffi.aroundstate.before - if before: before() + rffi.aroundstate.leave_callback() return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') @@ -147,8 +143,7 @@ if not space.config.objspace.usemodules.thread: return os_thread.setup_threads(space) - before = rffi.aroundstate.before - if before: before() + rffi.aroundstate.leave_callback() @entrypoint('main', [], c_name='pypy_thread_attach') def pypy_thread_attach(): @@ -159,8 +154,7 @@ rthread.gc_thread_start() os_thread.bootstrapper.nbthreads += 1 os_thread.bootstrapper.release() - before = rffi.aroundstate.before - if before: before() + rffi.aroundstate.leave_callback() w_globals = space.newdict() space.setitem(w_globals, space.wrap('__builtins__'), diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -29,8 +29,7 @@ state = space.fromcache(InterpreterState) tstate = state.swap_thread_state( space, lltype.nullptr(PyThreadState.TO)) - if rffi.aroundstate.before: - rffi.aroundstate.before() + rffi.aroundstate.leave_callback() return tstate @cpython_api([PyThreadState], lltype.Void) @@ -40,8 +39,7 @@ NULL. If the lock has been created, the current thread must not have acquired it, otherwise deadlock ensues. (This function is available even when thread support is disabled at compile time.)""" - if rffi.aroundstate.after: - rffi.aroundstate.after() + rffi.aroundstate.enter_callback() state = space.fromcache(InterpreterState) state.swap_thread_state(space, tstate) @@ -188,9 +186,7 @@ tstate, which should not be NULL. The lock must have been created earlier. If this thread already has the lock, deadlock ensues. This function is not available when thread support is disabled at compile time.""" - if rffi.aroundstate.after: - # After external call is before entering Python - rffi.aroundstate.after() + rffi.aroundstate.enter_callback() @cpython_api([PyThreadState], lltype.Void) def PyEval_ReleaseThread(space, tstate): @@ -200,9 +196,7 @@ that it represents the current thread state --- if it isn't, a fatal error is reported. This function is not available when thread support is disabled at compile time.""" - if rffi.aroundstate.before: - # Before external call is after running Python - rffi.aroundstate.before() + rffi.aroundstate.leave_callback() PyGILState_STATE = rffi.COpaquePtr('PyGILState_STATE', typedef='PyGILState_STATE', @@ -210,16 +204,12 @@ @cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) def PyGILState_Ensure(space): - if rffi.aroundstate.after: - # After external call is before entering Python - rffi.aroundstate.after() + rffi.aroundstate.enter_callback() return 0 @cpython_api([PyGILState_STATE], lltype.Void) def PyGILState_Release(space, state): - if rffi.aroundstate.before: - # Before external call is after running Python - rffi.aroundstate.before() + rffi.aroundstate.leave_callback() @cpython_api([], PyInterpreterState, error=CANNOT_FAIL) def PyInterpreterState_Head(space): @@ -243,12 +233,12 @@ raise NoThreads # PyThreadState_Get will allocate a new execution context, # we need to protect gc and other globals with the GIL. - rffi.aroundstate.after() + rffi.aroundstate.enter_callback() try: rthread.gc_thread_start() return PyThreadState_Get(space) finally: - rffi.aroundstate.before() + rffi.aroundstate.leave_callback() @cpython_api([PyThreadState], lltype.Void) def PyThreadState_Clear(space, tstate): diff --git a/rpython/jit/backend/llsupport/callbuilder.py b/rpython/jit/backend/llsupport/callbuilder.py --- a/rpython/jit/backend/llsupport/callbuilder.py +++ b/rpython/jit/backend/llsupport/callbuilder.py @@ -1,5 +1,5 @@ from rpython.rlib.clibffi import FFI_DEFAULT_ABI -from rpython.rlib import objectmodel +from rpython.rlib import rthread class AbstractCallBuilder(object): @@ -45,7 +45,7 @@ """Emit a CALL_RELEASE_GIL, including calls to releasegil_addr and reacqgil_addr.""" is_asmgcc = self.asm._is_asmgcc() - fastgil = objectmodel.prepare_enter_callback_from_jit(is_asmgcc) + fastgil = rthread.get_fastgil_addr_raw(is_asmgcc) self.select_call_release_gil_mode() self.prepare_arguments() self.push_gcmap_for_call_release_gil() diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -583,25 +583,17 @@ from rpython.rtyper.lltypesystem import rffi rffi.aroundstate.before = before rffi.aroundstate.after = after + # For now, 'aroundstate.enter_callback' does something for the non-GIL + # case that is also done by the full 'after' callback. So we can simply + # replace it with 'after' when we get one. + if after is not None: + rffi.aroundstate.enter_callback = after # the 'aroundstate' contains regular function and not ll pointers to them, # but let's call llhelper() anyway to force their annotation from rpython.rtyper.annlowlevel import llhelper llhelper(rffi.AroundFnPtr, before) llhelper(rffi.AroundFnPtr, after) -def _enter_callback_from_jit(): - from rpython.rlib import rthread - rthread.gil_enter_callback_without_gil() # no need for errno saving - -def prepare_enter_callback_from_jit(is_asmgcc): - from rpython.rlib import rthread - from rpython.rtyper.lltypesystem import rffi - if rffi.aroundstate.after is None: - rffi.aroundstate.after = _enter_callback_from_jit - from rpython.rtyper.annlowlevel import llhelper - llhelper(rffi.AroundFnPtr, _enter_callback_from_jit) - return rthread.get_fastgil_addr_raw(is_asmgcc) - def is_in_callback(): from rpython.rtyper.lltypesystem import rffi return rffi.stackcounter.stacks_counter > 1 diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -18,6 +18,11 @@ includes = ['src/thread.h'], separate_module_files = [translator_c_dir / 'src' / 'thread.c'], include_dirs = [translator_c_dir], + post_include_bits = [''' +#ifndef RPY_FASTGIL +# define RPyEnterCallbackWithoutGil() /* nothing */ +#endif +'''], export_symbols = ['RPyThreadGetIdent', 'RPyThreadLockInit', 'RPyThreadAcquireLock', 'RPyThreadAcquireLockTimed', 'RPyThreadReleaseLock', 'RPyGilAllocate', diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -280,9 +280,7 @@ source = py.code.Source(r""" def wrapper(%(args)s): # no *args - no GIL for mallocing the tuple if aroundstate is not None: - after = aroundstate.after - if after: - after() + aroundstate.enter_callback() # from now on we hold the GIL stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py @@ -298,12 +296,10 @@ result = errorcode stackcounter.stacks_counter -= 1 if aroundstate is not None: - before = aroundstate.before - if before: - before() + aroundstate.leave_callback() # here we don't hold the GIL any more. As in the wrapper() produced # by llexternal, it is essential that no exception checking occurs - # after the call to before(). + # after the call to leave_calback(). return result """ % locals()) miniglobals = locals().copy() @@ -315,12 +311,23 @@ return miniglobals['wrapper'] _make_wrapper_for._annspecialcase_ = 'specialize:memo' +def enter_callback_without_gil(): + if we_are_translated(): + from rpython.rlib import rthread + rthread.gil_enter_callback_without_gil() + AroundFnPtr = lltype.Ptr(lltype.FuncType([], lltype.Void)) class AroundState: def _cleanup_(self): self.before = None # or a regular RPython function self.after = None # or a regular RPython function + self.enter_callback = enter_callback_without_gil + def leave_callback(): + before = aroundstate.before # for now, it's the same + if before: before() + leave_callback._always_inline_ = True + leave_callback = staticmethod(leave_callback) aroundstate = AroundState() aroundstate._cleanup_() diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -724,7 +724,7 @@ } } } -#endif +#endif /* RPY_FASTGIL */ void RPyGilAcquire(void) { From noreply at buildbot.pypy.org Sun Mar 9 12:06:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 12:06:31 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: Fixes and tweaks Message-ID: <20140309110631.386971C33CA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69820:d1696c4adffb Date: 2014-03-09 12:05 +0100 http://bitbucket.org/pypy/pypy/changeset/d1696c4adffb/ Log: Fixes and tweaks diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -25,9 +25,6 @@ # arguments, we need to decrease esp temporarily stack_max = PASS_ON_MY_FRAME - # set by save_result_value() - tmpresloc = None - def __init__(self, assembler, fnloc, arglocs, resloc=eax, restype=INT, ressize=WORD): AbstractCallBuilder.__init__(self, assembler, fnloc, arglocs, @@ -68,12 +65,10 @@ if self.ressize == 0: return # void result # use the code in load_from_mem to do the zero- or sign-extension - srcloc = self.tmpresloc - if srcloc is None: - if self.restype == FLOAT: - srcloc = xmm0 - else: - srcloc = eax + if self.restype == FLOAT: + srcloc = xmm0 + else: + srcloc = eax if self.ressize >= WORD and self.resloc is srcloc: return # no need for any MOV if self.ressize == 1 and isinstance(srcloc, RegLoc): @@ -139,19 +134,25 @@ from rpython.jit.backend.x86.assembler import heap from rpython.jit.backend.x86 import rx86 # - # save the result we just got (in eax/eax+edx/st(0)/xmm0) - self.save_result_value() - # call the reopenstack() function (also reacquiring the GIL) + # check if we need to call the reopenstack() function + # (to acquiring the GIL, remove the asmgcc head from + # the chained list, etc.) mc = self.mc + restore_edx = False if not self.asm._is_asmgcc(): + css = 0 css_value = imm(1) - old_value = edx + old_value = ecx else: from rpython.memory.gctransform import asmgcroot css = WORD * (PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS) if IS_X86_32: - css_value = ecx - old_value = edx + assert css >= 16 + if self.restype == 'L': # long long result: eax/edx + mc.MOV_sr(12, edx.value) + restore_edx = True + css_value = edx + old_value = ecx elif IS_X86_64: css_value = edi old_value = esi @@ -167,29 +168,39 @@ mc.J_il8(rx86.Conditions['E'], 0) je_location = mc.get_relative_pos() # + # Yes, we need to call the reopenstack() function + self.save_result_value_reacq() if IS_X86_32: mc.MOV_sr(4, css_value.value) mc.MOV_sr(0, old_value.value) mc.CALL(imm(self.asm.reacqgil_addr)) + self.restore_result_value_reacq() # # patch the JE above offset = mc.get_relative_pos() - je_location assert 0 < offset <= 127 mc.overwrite(je_location-1, chr(offset)) # - if not we_are_translated(): # for testing: now we can accesss - self.mc.SUB(ebp, imm(1)) # ebp again + if restore_edx: + mc.MOV_rs(edx.value, 12) # restore this + # + if not we_are_translated(): # for testing: now we can accesss + mc.SUB(ebp, imm(1)) # ebp again # # Now that we required the GIL, we can reload a possibly modified ebp if self.asm._is_asmgcc(): # special-case: reload ebp from the css from rpython.memory.gctransform import asmgcroot index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP) - self.mc.MOV_rs(ebp.value, index_of_ebp) # MOV EBP, [css.ebp] + mc.MOV_rs(ebp.value, index_of_ebp) # MOV EBP, [css.ebp] #else: # for shadowstack, done for us by _reload_frame_if_necessary() - def save_result_value(self): + def save_result_value_reacq(self): + """Overridden in CallBuilder32 and CallBuilder64""" + raise NotImplementedError + + def restore_result_value_reacq(self): """Overridden in CallBuilder32 and CallBuilder64""" raise NotImplementedError @@ -242,45 +253,40 @@ resloc = self.resloc if resloc is not None and resloc.is_float(): # a float or a long long return - if self.tmpresloc is None: - if self.restype == 'L': # long long - # move eax/edx -> xmm0 - self.mc.MOVD_xr(resloc.value^1, edx.value) - self.mc.MOVD_xr(resloc.value, eax.value) - self.mc.PUNPCKLDQ_xx(resloc.value, resloc.value^1) - else: - # float: we have to go via the stack - self.mc.FSTPL_s(0) - self.mc.MOVSD_xs(resloc.value, 0) + if self.restype == 'L': # long long + # move eax/edx -> xmm0 + self.mc.MOVD_xr(resloc.value^1, edx.value) + self.mc.MOVD_xr(resloc.value, eax.value) + self.mc.PUNPCKLDQ_xx(resloc.value, resloc.value^1) else: - self.mc.MOVSD(resloc, self.tmpresloc) + # float: we have to go via the stack + self.mc.FSTPL_s(0) + self.mc.MOVSD_xs(resloc.value, 0) # elif self.restype == 'S': # singlefloat return: must convert ST(0) to a 32-bit singlefloat # and load it into self.resloc. mess mess mess - if self.tmpresloc is None: - self.mc.FSTPS_s(0) - self.mc.MOV_rs(resloc.value, 0) - else: - self.mc.MOV(resloc, self.tmpresloc) + self.mc.FSTPS_s(0) + self.mc.MOV_rs(resloc.value, 0) else: CallBuilderX86.load_result(self) - def save_result_value(self): + def save_result_value_reacq(self): # Temporarily save the result value into [ESP+8]. We use "+8" - # in order to leave the two initial words free, in case it's needed + # in order to leave the two initial words free, in case it's needed. + # Also note that in this 32-bit case, a long long return value is + # in eax/edx, but we already saved the value of edx in + # move_real_result_and_call_reacqgil_addr(). if self.ressize == 0: # void return return if self.resloc.is_float(): # a float or a long long return - self.tmpresloc = RawEspLoc(8, FLOAT) if self.restype == 'L': self.mc.MOV_sr(8, eax.value) # long long - self.mc.MOV_sr(12, edx.value) + #self.mc.MOV_sr(12, edx.value) -- already done else: self.mc.FSTPL_s(8) # float return else: - self.tmpresloc = RawEspLoc(8, INT) if self.restype == 'S': self.mc.FSTPS_s(8) else: @@ -288,6 +294,25 @@ assert self.ressize <= WORD self.mc.MOV_sr(8, eax.value) + def restore_result_value_reacq(self): + # Opposite of save_result_value_reacq() + if self.ressize == 0: # void return + return + if self.resloc.is_float(): + # a float or a long long return + if self.restype == 'L': + self.mc.MOV_rs(eax.value, 8) # long long + #self.mc.MOV_rs(edx.value, 12) -- will be done for us + else: + self.mc.FLDL_s(8) # float return + else: + if self.restype == 'S': + self.mc.FLDS_s(8) + else: + assert self.restype == INT + assert self.ressize <= WORD + self.mc.MOV_rs(eax.value, 8) + class CallBuilder64(CallBuilderX86): @@ -394,35 +419,45 @@ assert 0 # should not occur on 64-bit def load_result(self): - if self.restype == 'S' and self.tmpresloc is None: + if self.restype == 'S': # singlefloat return: use MOVD to load the target register # from the lower 32 bits of XMM0 self.mc.MOVD(self.resloc, xmm0) else: CallBuilderX86.load_result(self) - def save_result_value(self): + def save_result_value_reacq(self): # Temporarily save the result value into [ESP]. if self.ressize == 0: # void return return # if self.restype == FLOAT: # and not 'S' self.mc.MOVSD_sx(0, xmm0.value) - self.tmpresloc = RawEspLoc(0, FLOAT) return # - if len(self.free_callee_save_gprs) == 0: - self.tmpresloc = RawEspLoc(0, INT) - else: - self.tmpresloc = self.free_callee_save_gprs[0] - # if self.restype == 'S': # singlefloat return: use MOVD to store the lower 32 bits - # of XMM0 into the tmpresloc (register or [ESP]) - self.mc.MOVD(self.tmpresloc, xmm0) + # of XMM0 into [ESP] (nb. this is actually MOVQ, so will + # store 64 bits instead of only 32, but that's fine) + self.mc.MOVD_sx(0, xmm0.value) else: assert self.restype == INT - self.mc.MOV(self.tmpresloc, eax) + self.mc.MOV_sr(0, eax.value) + + def restore_result_value_reacq(self): + # Opposite of save_result_value_reacq() + if self.ressize == 0: # void return + return + # + if self.restype == FLOAT: # and not 'S' + self.mc.MOVSD_xs(xmm0.value, 0) + return + # + if self.restype == 'S': + self.mc.MOVD_xs(xmm0.value, 0) + else: + assert self.restype == INT + self.mc.MOV_rs(eax.value, 0) if IS_X86_32: diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -586,6 +586,8 @@ FSTPL_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) # rffi.DOUBLE ('as' wants L??) FSTPL_s = insn('\xDD', orbyte(3<<3), stack_sp(1)) # rffi.DOUBLE ('as' wants L??) FSTPS_s = insn('\xD9', orbyte(3<<3), stack_sp(1)) # lltype.SingleFloat + FLDL_s = insn('\xDD', orbyte(0<<3), stack_sp(1)) + FLDS_s = insn('\xD9', orbyte(0<<3), stack_sp(1)) # ------------------------------ Random mess ----------------------- RDTSC = insn('\x0F\x31') @@ -620,6 +622,8 @@ MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0') MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0') MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2)) + MOVD_sx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), stack_sp(1)) + MOVD_xs = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_sp(2)) PSRAD_xi = xmminsn('\x66', rex_nw, '\x0F\x72', register(1), '\xE0', immediate(2, 'b')) diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -105,9 +105,9 @@ def get_fastgil_addr_raw(is_asmgcc): if is_asmgcc: # must be constant! - return _fetch_fastgil(42) + return _fetch_fastgil(42)() else: - return _fetch_fastgil(1) + return _fetch_fastgil(1)() def allocate_lock(): From noreply at buildbot.pypy.org Sun Mar 9 12:18:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 12:18:25 +0100 (CET) Subject: [pypy-commit] pypy default: Silence warning Message-ID: <20140309111825.F0E841C1191@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69821:4f9005ca99fc Date: 2014-03-09 12:17 +0100 http://bitbucket.org/pypy/pypy/changeset/4f9005ca99fc/ Log: Silence warning diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -188,7 +188,8 @@ # this function is written directly in C; gcc will optimize it using SSE eci = ExternalCompilationInfo(post_include_bits=[""" static void pypy__decay_jit_counters(char *data, double f1, long size) { - struct { float times[5]; unsigned short subhashes[5]; } *p = data; + struct rpy_jitcnt { float times[5]; unsigned short subhashes[5]; }; + struct rpy_jitcnt *p = (struct rpy_jitcnt *)data; float f = (float)f1; long i; for (i=0; i Author: Armin Rigo Branch: fast-gil Changeset: r69822:dda0292acbfc Date: 2014-03-09 12:19 +0100 http://bitbucket.org/pypy/pypy/changeset/dda0292acbfc/ Log: Fixes diff --git a/rpython/translator/c/src/thread.c b/rpython/translator/c/src/thread.c --- a/rpython/translator/c/src/thread.c +++ b/rpython/translator/c/src/thread.c @@ -1,14 +1,23 @@ /* Thread implementation */ #include "src/thread.h" -#ifdef PYPY_USING_BOEHM_GC /* The following include is required by the Boehm GC, which apparently * crashes when pthread_create_thread() is not redefined to call a * Boehm wrapper function instead. Ugly. + * + * It is also needed to see the definition of RPY_FASTGIL, if there is one. */ #include "common_header.h" + +/* More ugliness follows... */ +#ifdef RPY_FASTGIL +# if RPY_FASTGIL == 42 /* special value to mean "asmgcc" */ +# include "structdef.h" +# include "forwarddecl.h" +# endif #endif + #ifdef _WIN32 #include "src/thread_nt.c" #else diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -598,10 +598,10 @@ void *result; #if defined(__amd64__) asm volatile ("xchgq %0, %1 /* automatically locked */" - : "r"(result) : "0"(value), "m"(*ptr) : "memory"); + : "=r"(result) : "0"(value), "m"(*ptr) : "memory"); #elif defined(__i386__) asm volatile ("xchgl %0, %1 /* automatically locked */" - : "r"(result) : "0"(value), "m"(*ptr) : "memory"); + : "=r"(result) : "0"(value), "m"(*ptr) : "memory"); #else /* requires gcc >= 4.1 */ while (1) { @@ -646,7 +646,7 @@ return 0; } -static inline timespec_add(struct timespec *t, long incr) +static inline void timespec_add(struct timespec *t, long incr) { long nsec = t->tv_nsec + incr; if (nsec >= 1000000000) { From noreply at buildbot.pypy.org Sun Mar 9 12:27:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 12:27:01 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: fixes Message-ID: <20140309112701.65F4D1C1191@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69823:7ee265600e15 Date: 2014-03-09 12:26 +0100 http://bitbucket.org/pypy/pypy/changeset/7ee265600e15/ Log: fixes diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -846,6 +846,10 @@ return [] def _visit_xchg(self, line): + # ignore the special locking xchg put there by custom assembler + # in thread_pthread.c, with an associated comment + if line.endswith('*/\n'): + return [] # only support the format used in VALGRIND_DISCARD_TRANSLATIONS # which is to use a marker no-op "xchgl %ebx, %ebx" match = self.r_binaryinsn.match(line) diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -597,10 +597,10 @@ { void *result; #if defined(__amd64__) - asm volatile ("xchgq %0, %1 /* automatically locked */" + asm volatile ("xchgq %0, %2 /* automatically locked */" : "=r"(result) : "0"(value), "m"(*ptr) : "memory"); #elif defined(__i386__) - asm volatile ("xchgl %0, %1 /* automatically locked */" + asm volatile ("xchgl %0, %2 /* automatically locked */" : "=r"(result) : "0"(value), "m"(*ptr) : "memory"); #else /* requires gcc >= 4.1 */ From noreply at buildbot.pypy.org Sun Mar 9 12:43:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 12:43:31 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: Fixes for test_zrpy_release_gil Message-ID: <20140309114331.3F5C51C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69824:c42760c25a19 Date: 2014-03-09 12:42 +0100 http://bitbucket.org/pypy/pypy/changeset/c42760c25a19/ Log: Fixes for test_zrpy_release_gil diff --git a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py @@ -27,7 +27,7 @@ glob.event += 1 def before(n, x): - invoke_around_extcall(func, func) + invoke_around_extcall(func, None) return (n, None, None, None, None, None, None, None, None, None, None, None) # diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -591,8 +591,10 @@ # the 'aroundstate' contains regular function and not ll pointers to them, # but let's call llhelper() anyway to force their annotation from rpython.rtyper.annlowlevel import llhelper - llhelper(rffi.AroundFnPtr, before) - llhelper(rffi.AroundFnPtr, after) + if before is not None: + llhelper(rffi.AroundFnPtr, before) + if after is not None: + llhelper(rffi.AroundFnPtr, after) def is_in_callback(): from rpython.rtyper.lltypesystem import rffi From noreply at buildbot.pypy.org Sun Mar 9 13:13:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 13:13:33 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: Small refactoring in rffi. The goal is to make zrpy_releasegil_test pass: previously, it was accidentally Message-ID: <20140309121333.684191D2825@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69826:329ece4367a6 Date: 2014-03-09 13:10 +0100 http://bitbucket.org/pypy/pypy/changeset/329ece4367a6/ Log: Small refactoring in rffi. The goal is to make zrpy_releasegil_test pass: previously, it was accidentally asking for a no-aroundstate version. diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -218,13 +218,8 @@ freeme = arg elif _isfunctype(TARGET) and not _isllptr(arg): # XXX pass additional arguments - if invoke_around_handlers: - arg = llhelper(TARGET, _make_wrapper_for(TARGET, arg, - callbackholder, - aroundstate)) - else: - arg = llhelper(TARGET, _make_wrapper_for(TARGET, arg, - callbackholder)) + arg = llhelper(TARGET, _make_wrapper_for( + TARGET, arg, invoke_around_handlers, callbackholder)) else: SOURCE = lltype.typeOf(arg) if SOURCE != TARGET: @@ -263,7 +258,8 @@ def __init__(self): self.callbacks = {} -def _make_wrapper_for(TP, callable, callbackholder=None, aroundstate=None): +def _make_wrapper_for(TP, callable, invoke_around_handlers=True, + callbackholder=None): """ Function creating wrappers for callbacks. Note that this is cheating as we assume constant callbacks and we just memoize wrappers """ @@ -279,7 +275,7 @@ args = ', '.join(['a%d' % i for i in range(len(TP.TO.ARGS))]) source = py.code.Source(r""" def wrapper(%(args)s): # no *args - no GIL for mallocing the tuple - if aroundstate is not None: + if invoke_around_handlers: aroundstate.enter_callback() # from now on we hold the GIL stackcounter.stacks_counter += 1 @@ -295,7 +291,7 @@ traceback.print_exc() result = errorcode stackcounter.stacks_counter -= 1 - if aroundstate is not None: + if invoke_around_handlers: aroundstate.leave_callback() # here we don't hold the GIL any more. As in the wrapper() produced # by llexternal, it is essential that no exception checking occurs @@ -307,6 +303,7 @@ miniglobals['os'] = os miniglobals['we_are_translated'] = we_are_translated miniglobals['stackcounter'] = stackcounter + miniglobals['aroundstate'] = aroundstate exec source.compile() in miniglobals return miniglobals['wrapper'] _make_wrapper_for._annspecialcase_ = 'specialize:memo' From noreply at buildbot.pypy.org Sun Mar 9 13:25:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 13:25:59 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: Fix: seems that thread.c is also compiled early when running "rpython". Message-ID: <20140309122559.BE1291D2826@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69827:3462974498df Date: 2014-03-09 13:24 +0100 http://bitbucket.org/pypy/pypy/changeset/3462974498df/ Log: Fix: seems that thread.c is also compiled early when running "rpython". diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -99,7 +99,7 @@ @specialize.memo() def _fetch_fastgil(rpy_fastgil_value): eci = ExternalCompilationInfo( - pre_include_bits = ['#define RPY_FASTGIL %d' % rpy_fastgil_value]) + compile_extra = ['-DRPY_FASTGIL=%d' % rpy_fastgil_value]) return rffi.llexternal('RPyFetchFastGil', [], lltype.Signed, compilation_info=eci, sandboxsafe=True) diff --git a/rpython/translator/c/src/thread.c b/rpython/translator/c/src/thread.c --- a/rpython/translator/c/src/thread.c +++ b/rpython/translator/c/src/thread.c @@ -1,17 +1,19 @@ /* Thread implementation */ #include "src/thread.h" +#ifdef PYPY_USING_BOEHM_GC /* The following include is required by the Boehm GC, which apparently * crashes when pthread_create_thread() is not redefined to call a * Boehm wrapper function instead. Ugly. - * - * It is also needed to see the definition of RPY_FASTGIL, if there is one. */ #include "common_header.h" +#endif + /* More ugliness follows... */ #ifdef RPY_FASTGIL # if RPY_FASTGIL == 42 /* special value to mean "asmgcc" */ +# include "common_header.h" # include "structdef.h" # include "forwarddecl.h" # endif From noreply at buildbot.pypy.org Sun Mar 9 13:57:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 13:57:20 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: Fix (32-bit, shadowstack, long long result not correctly saved/restored in case of threads) Message-ID: <20140309125720.491931C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69828:2473e1024a80 Date: 2014-03-09 13:41 +0100 http://bitbucket.org/pypy/pypy/changeset/2473e1024a80/ Log: Fix (32-bit, shadowstack, long long result not correctly saved/restored in case of threads) diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -169,12 +169,12 @@ je_location = mc.get_relative_pos() # # Yes, we need to call the reopenstack() function - self.save_result_value_reacq() + self.save_result_value_reacq(restore_edx) if IS_X86_32: mc.MOV_sr(4, css_value.value) mc.MOV_sr(0, old_value.value) mc.CALL(imm(self.asm.reacqgil_addr)) - self.restore_result_value_reacq() + self.restore_result_value_reacq(restore_edx) # # patch the JE above offset = mc.get_relative_pos() - je_location @@ -196,11 +196,11 @@ #else: # for shadowstack, done for us by _reload_frame_if_necessary() - def save_result_value_reacq(self): + def save_result_value_reacq(self, restore_edx): """Overridden in CallBuilder32 and CallBuilder64""" raise NotImplementedError - def restore_result_value_reacq(self): + def restore_result_value_reacq(self, restore_edx): """Overridden in CallBuilder32 and CallBuilder64""" raise NotImplementedError @@ -271,7 +271,7 @@ else: CallBuilderX86.load_result(self) - def save_result_value_reacq(self): + def save_result_value_reacq(self, restore_edx): # Temporarily save the result value into [ESP+8]. We use "+8" # in order to leave the two initial words free, in case it's needed. # Also note that in this 32-bit case, a long long return value is @@ -283,7 +283,8 @@ # a float or a long long return if self.restype == 'L': self.mc.MOV_sr(8, eax.value) # long long - #self.mc.MOV_sr(12, edx.value) -- already done + if not restore_edx: + self.mc.MOV_sr(12, edx.value) else: self.mc.FSTPL_s(8) # float return else: @@ -294,7 +295,7 @@ assert self.ressize <= WORD self.mc.MOV_sr(8, eax.value) - def restore_result_value_reacq(self): + def restore_result_value_reacq(self, restore_edx): # Opposite of save_result_value_reacq() if self.ressize == 0: # void return return @@ -302,7 +303,8 @@ # a float or a long long return if self.restype == 'L': self.mc.MOV_rs(eax.value, 8) # long long - #self.mc.MOV_rs(edx.value, 12) -- will be done for us + if not restore_edx: + self.mc.MOV_rs(edx.value, 12) else: self.mc.FLDL_s(8) # float return else: @@ -426,7 +428,7 @@ else: CallBuilderX86.load_result(self) - def save_result_value_reacq(self): + def save_result_value_reacq(self, restore_edx): # Temporarily save the result value into [ESP]. if self.ressize == 0: # void return return @@ -444,7 +446,7 @@ assert self.restype == INT self.mc.MOV_sr(0, eax.value) - def restore_result_value_reacq(self): + def restore_result_value_reacq(self, restore_edx): # Opposite of save_result_value_reacq() if self.ressize == 0: # void return return From noreply at buildbot.pypy.org Sun Mar 9 13:57:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 13:57:21 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: Fix Message-ID: <20140309125721.A72631C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69829:d62d573ed694 Date: 2014-03-09 13:56 +0100 http://bitbucket.org/pypy/pypy/changeset/d62d573ed694/ Log: Fix diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -588,9 +588,9 @@ static void *rpy_fastgil = NULL; -Signed RPyFetchFastGil(void) +long RPyFetchFastGil(void) { - return (Signed)(&rpy_fastgil); + return (long)(&rpy_fastgil); } static inline void *atomic_xchg(void **ptr, void *value) From noreply at buildbot.pypy.org Sun Mar 9 14:06:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Mar 2014 14:06:46 +0100 (CET) Subject: [pypy-commit] pypy fast-gil: 32-bit fix Message-ID: <20140309130646.927371C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-gil Changeset: r69830:d30d2a515ee0 Date: 2014-03-09 14:06 +0100 http://bitbucket.org/pypy/pypy/changeset/d30d2a515ee0/ Log: 32-bit fix diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -170,9 +170,11 @@ # # Yes, we need to call the reopenstack() function self.save_result_value_reacq(restore_edx) - if IS_X86_32: - mc.MOV_sr(4, css_value.value) - mc.MOV_sr(0, old_value.value) + if self.asm._is_asmgcc(): + if IS_X86_32: + mc.MOV_sr(4, old_value.value) + mc.MOV_sr(0, css_value.value) + # on X86_64, they are already in the right registers mc.CALL(imm(self.asm.reacqgil_addr)) self.restore_result_value_reacq(restore_edx) # From noreply at buildbot.pypy.org Sun Mar 9 19:07:28 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 9 Mar 2014 19:07:28 +0100 (CET) Subject: [pypy-commit] buildbot default: try to link freebsd64 build results as well Message-ID: <20140309180728.BE7ED1C1191@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r908:f08767b1bf05 Date: 2014-03-09 14:07 -0400 http://bitbucket.org/pypy/buildbot/changeset/f08767b1bf05/ Log: try to link freebsd64 build results as well diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -35,11 +35,12 @@ } PLATFORMS = { - 'linux': 'linux-x86-32', - 'linux64': 'linux-x86-64', - 'osx': 'macosx-x86-32', - 'osx64': 'macosx-x86-64', - 'win32': 'win-x86-32', + 'linux': 'linux-x86-32', + 'linux64': 'linux-x86-64', + 'osx': 'macosx-x86-32', + 'osx64': 'macosx-x86-64', + 'win32': 'win-x86-32', + 'freebsd64': 'freebsd-9-x86-64', } DESCRIPTIONS = { From noreply at buildbot.pypy.org Sun Mar 9 20:27:29 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 9 Mar 2014 20:27:29 +0100 (CET) Subject: [pypy-commit] buildbot default: add own build for win32, lock it Message-ID: <20140309192729.B24631C1191@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r909:ffb97030dbbe Date: 2014-03-09 21:25 +0200 http://bitbucket.org/pypy/buildbot/changeset/ffb97030dbbe/ Log: add own build for win32, lock it diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -24,7 +24,7 @@ # there are 8 logical CPUs, but only 4 physical ones, and only enough memory for ~3 translations TannitCPU = locks.MasterLock('tannit_cpu', maxCount=3) SpeedPythonCPU = locks.MasterLock('speed_python_cpu', maxCount=24) -#WinLockCPU = locks.MasterLock('win_cpu', maxCount=1) +WinSlaveLock = locks.SlaveLock('win_cpu', maxCount=1) # The cross translation machine can accomodate 2 jobs at the same time ARMCrossLock = locks.MasterLock('arm_cpu', maxCount=2) diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -49,7 +49,7 @@ ARM = load('pypybuildbot.arm_master') TannitCPU = pypybuilds.TannitCPU -#WinLockCPU = pypybuilds.WinLockCPU +WinSlaveLock = pypybuilds.WinSlaveLock pypyOwnTestFactory = pypybuilds.Own() pypyOwnTestFactoryWin = pypybuilds.Own(platform="win32") @@ -216,7 +216,8 @@ APPLVLLINUX64, # on allegro64, uses 1 core # other platforms #MACOSX32, # on minime - JITWIN32, # on aurora + JITWIN32, # on aurora, SalsaSalsa + WIN32, # on aurora, SalsaSalsa #JITFREEBSD764, # on headless #JITFREEBSD864, # on ananke JITFREEBSD964, # on tavendo @@ -387,6 +388,7 @@ "slavenames": ["aurora", "SalsaSalsa"], "builddir": WIN32, "factory": pypyOwnTestFactoryWin, + "locks": [WinSlaveLock], "category": 'win32', }, {"name": WIN64, @@ -399,12 +401,14 @@ "slavenames": ["aurora", "SalsaSalsa"], "builddir": APPLVLWIN32, "factory": pypyTranslatedAppLevelTestFactoryWin, + "locks": [WinSlaveLock], "category": "win32", }, {"name" : JITWIN32, "slavenames": ["aurora", "SalsaSalsa"], 'builddir' : JITWIN32, 'factory' : pypyJITTranslatedTestFactoryWin, + "locks": [WinSlaveLock], 'category' : 'win32', }, {"name" : JITWIN64, From noreply at buildbot.pypy.org Sun Mar 9 20:27:30 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 9 Mar 2014 20:27:30 +0100 (CET) Subject: [pypy-commit] buildbot default: add comment field with builder name Message-ID: <20140309192730.DE9571C1191@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r910:57b3c336e5ad Date: 2014-03-09 21:26 +0200 http://bitbucket.org/pypy/buildbot/changeset/57b3c336e5ad/ Log: add comment field with builder name diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -202,7 +202,7 @@ branch = '%3Ctrunk%3E' # if category: href = cgi.escape('/summary?category=%s&branch=%s&recentrev=%s' % (category, branch, rev)) - str_summary = '%s' % (href, summary) + str_summary = '%s' % (builder_name, href, summary) else: str_summary = str(summary) element[prefix + 'summary'] = str_summary From noreply at buildbot.pypy.org Sun Mar 9 20:44:13 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 9 Mar 2014 20:44:13 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: fix for the last merge Message-ID: <20140309194413.C71601C01F0@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69831:1392e99d3610 Date: 2014-03-09 12:43 -0700 http://bitbucket.org/pypy/pypy/changeset/1392e99d3610/ Log: fix for the last merge diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -74,7 +74,7 @@ v = unicodehelper.decode_utf8(space, substr) return space.wrap(v) - v = PyString_DecodeEscape(space, substr, 'strict', enc) + v = PyString_DecodeEscape(space, substr, 'strict', encoding) return space.wrapbytes(v) def hexbyte(val): From noreply at buildbot.pypy.org Sun Mar 9 21:09:49 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 9 Mar 2014 21:09:49 +0100 (CET) Subject: [pypy-commit] buildbot default: Backed out changeset: 57b3c336e5ad Message-ID: <20140309200949.562FA1C01F0@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r911:9d10136bfea6 Date: 2014-03-09 22:09 +0200 http://bitbucket.org/pypy/buildbot/changeset/9d10136bfea6/ Log: Backed out changeset: 57b3c336e5ad diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -202,7 +202,7 @@ branch = '%3Ctrunk%3E' # if category: href = cgi.escape('/summary?category=%s&branch=%s&recentrev=%s' % (category, branch, rev)) - str_summary = '%s' % (builder_name, href, summary) + str_summary = '%s' % (href, summary) else: str_summary = str(summary) element[prefix + 'summary'] = str_summary From noreply at buildbot.pypy.org Sun Mar 9 23:22:23 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 9 Mar 2014 23:22:23 +0100 (CET) Subject: [pypy-commit] pypy default: minimize changes to upstream (bdk) Message-ID: <20140309222223.079501C03B3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69832:34c609b1dc8d Date: 2014-03-09 22:28 +0200 http://bitbucket.org/pypy/pypy/changeset/34c609b1dc8d/ Log: minimize changes to upstream (bdk) diff --git a/lib-python/2.7/test/test_zipfile.py b/lib-python/2.7/test/test_zipfile.py --- a/lib-python/2.7/test/test_zipfile.py +++ b/lib-python/2.7/test/test_zipfile.py @@ -421,7 +421,7 @@ zipfp.extractall() for fpath, fdata in SMALL_TEST_DATA: outfile = os.path.join(os.getcwd(), fpath) - + with open(outfile, "rb") as fid: self.assertEqual(fdata, fid.read()) os.remove(outfile) @@ -596,8 +596,6 @@ def tearDown(self): unlink(TESTFN) unlink(TESTFN2) - if os.path.exists(TESTFN): - os.remove(TESTFN) class TestZip64InSmallFiles(unittest.TestCase): @@ -717,12 +715,6 @@ class PyZipFileTests(unittest.TestCase): - def teardown(self): - if os.path.exists(TESTFN): - os.remove(TESTFN) - if os.path.exists(TESTFN2): - os.remove(TESTFN2) - def test_write_pyfile(self): with zipfile.PyZipFile(TemporaryFile(), "w") as zipfp: fn = __file__ @@ -1168,8 +1160,6 @@ def tearDown(self): unlink(TESTFN) unlink(TESTFN2) - if os.path.exists(TESTFN): - os.remove(TESTFN) class DecryptionTests(unittest.TestCase): @@ -1220,28 +1210,16 @@ def test_bad_password(self): self.zip.setpassword("perl") - try: - self.assertRaises(RuntimeError, self.zip.read, "test.txt") - finally: - self.zip.close() + self.assertRaises(RuntimeError, self.zip.read, "test.txt") self.zip2.setpassword("perl") - try: - self.assertRaises(RuntimeError, self.zip2.read, "zero") - finally: - self.zip2.close() + self.assertRaises(RuntimeError, self.zip2.read, "zero") @skipUnless(zlib, "requires zlib") def test_good_password(self): self.zip.setpassword("python") - try: - self.assertEqual(self.zip.read("test.txt"), self.plain) - finally: - self.zip.close() + self.assertEqual(self.zip.read("test.txt"), self.plain) self.zip2.setpassword("12345") - try: - self.assertEqual(self.zip2.read("zero"), self.plain2) - finally: - self.zip2.close() + self.assertEqual(self.zip2.read("zero"), self.plain2) class TestsWithRandomBinaryFiles(unittest.TestCase): @@ -1255,10 +1233,8 @@ fp.write(self.data) def tearDown(self): - if os.path.exists(TESTFN): - os.remove(TESTFN) - if os.path.exists(TESTFN2): - os.remove(TESTFN2) + unlink(TESTFN) + unlink(TESTFN2) def make_test_archive(self, f, compression): # Create the ZIP archive @@ -1401,8 +1377,6 @@ zipf.read('ones') with zipf.open('ones') as zopen1: pass - for x in range(10): - self.assertLess(open('/dev/null').fileno(), 100) def tearDown(self): unlink(TESTFN2) @@ -1433,7 +1407,7 @@ def tearDown(self): rmtree(TESTFN2) if os.path.exists(TESTFN): - os.remove(TESTFN) + unlink(TESTFN) class UniversalNewlineTests(unittest.TestCase): From noreply at buildbot.pypy.org Sun Mar 9 23:22:24 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 9 Mar 2014 23:22:24 +0100 (CET) Subject: [pypy-commit] pypy default: no longer needed Message-ID: <20140309222224.65D151C03B3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69833:f43ca379c4c9 Date: 2014-03-09 23:17 +0200 http://bitbucket.org/pypy/pypy/changeset/f43ca379c4c9/ Log: no longer needed diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -113,8 +113,6 @@ if msvc_compiler_environ: self.c_environ = os.environ.copy() self.c_environ.update(msvc_compiler_environ) - # XXX passing an environment to subprocess is not enough. Why? - os.environ.update(msvc_compiler_environ) # detect version of current compiler returncode, stdout, stderr = _run_subprocess(self.cc, '', From noreply at buildbot.pypy.org Mon Mar 10 00:20:26 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 10 Mar 2014 00:20:26 +0100 (CET) Subject: [pypy-commit] buildbot default: fix locking Message-ID: <20140309232026.50C761C03B3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r912:0d86998585e3 Date: 2014-03-10 01:19 +0200 http://bitbucket.org/pypy/buildbot/changeset/0d86998585e3/ Log: fix locking diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -388,7 +388,7 @@ "slavenames": ["aurora", "SalsaSalsa"], "builddir": WIN32, "factory": pypyOwnTestFactoryWin, - "locks": [WinSlaveLock], + "locks": [WinSlaveLock.access('counting')], "category": 'win32', }, {"name": WIN64, @@ -401,14 +401,14 @@ "slavenames": ["aurora", "SalsaSalsa"], "builddir": APPLVLWIN32, "factory": pypyTranslatedAppLevelTestFactoryWin, - "locks": [WinSlaveLock], + "locks": [WinSlaveLock.access('counting')], "category": "win32", }, {"name" : JITWIN32, "slavenames": ["aurora", "SalsaSalsa"], 'builddir' : JITWIN32, 'factory' : pypyJITTranslatedTestFactoryWin, - "locks": [WinSlaveLock], + "locks": [WinSlaveLock.access('counting')], 'category' : 'win32', }, {"name" : JITWIN64, From noreply at buildbot.pypy.org Mon Mar 10 08:06:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Mar 2014 08:06:54 +0100 (CET) Subject: [pypy-commit] pypy default: Give up and return a regular int from compute_unique_id(), with an explanation. Message-ID: <20140310070654.849B61C1178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69834:3a69dad4faf9 Date: 2014-03-10 08:06 +0100 http://bitbucket.org/pypy/pypy/changeset/3a69dad4faf9/ Log: Give up and return a regular int from compute_unique_id(), with an explanation. diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -432,7 +432,11 @@ costly depending on the garbage collector. To remind you of this fact, we don't support id(x) directly. """ - return id(x) # XXX need to return r_longlong on some platforms + # The assumption with RPython is that a regular integer is wide enough + # to store a pointer. The following intmask() should not loose any + # information. + from rpython.rlib.rarithmetic import intmask + return intmask(id(x)) def current_object_addr_as_int(x): """A cheap version of id(x). diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -177,10 +177,13 @@ assert h == getattr(foo, '__precomputed_identity_hash') def test_compute_unique_id(): + from rpython.rlib.rarithmetic import intmask class Foo(object): pass foo = Foo() - assert compute_unique_id(foo) == id(foo) + x = compute_unique_id(foo) + assert type(x) is int + assert x == intmask(id(foo)) def test_current_object_addr_as_int(): from rpython.rlib.rarithmetic import intmask From noreply at buildbot.pypy.org Mon Mar 10 18:18:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Mar 2014 18:18:09 +0100 (CET) Subject: [pypy-commit] stmgc default: Use "long" here, like the result type of stm_hash(). Message-ID: <20140310171809.B8C701D2576@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r970:705cfd3ba3a9 Date: 2014-03-10 18:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/705cfd3ba3a9/ Log: Use "long" here, like the result type of stm_hash(). diff --git a/c7/stm/hash_id.c b/c7/stm/hash_id.c --- a/c7/stm/hash_id.c +++ b/c7/stm/hash_id.c @@ -55,7 +55,7 @@ return id_or_identityhash(obj, true); } -void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash) +void stm_set_prebuilt_identityhash(object_t *obj, long hash) { struct object_s *realobj = (struct object_s *) REAL_ADDRESS(stm_object_pages, obj); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -267,7 +267,7 @@ controlled for each prebuilt object individually. (Useful uor PyPy) */ long stm_identityhash(object_t *obj); long stm_id(object_t *obj); -void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash); +void stm_set_prebuilt_identityhash(object_t *obj, long hash); /* Returns 1 if the object can still move (it's in the nursery), or 0 otherwise. After a minor collection no object can move any more. */ From noreply at buildbot.pypy.org Mon Mar 10 18:40:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Mar 2014 18:40:50 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Generate prebuilt objects with the expected hashes. Message-ID: <20140310174050.42E451C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69837:27a3cd675ae1 Date: 2014-03-10 18:39 +0100 http://bitbucket.org/pypy/pypy/changeset/27a3cd675ae1/ Log: Generate prebuilt objects with the expected hashes. diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -468,6 +468,12 @@ def get_prebuilt_hash(self, obj): return None # done differently with the stmgc + def get_stm_prebuilt_hash(self, obj): + h = BasicFrameworkGcPolicy.get_prebuilt_hash(self, obj) + if h is None: + h = object.__hash__(obj) # a "random enough" number + return h + name_to_gcpolicy = { 'boehm': BoehmGcPolicy, diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -799,7 +799,6 @@ if database.with_stm: print >> f print >> f, 'extern object_t *rpy_prebuilt[];' - print >> f, 'extern long rpy_prebuilt_hashes[];' print >> f for node in database.globalcontainers(): for line in node.forward_declaration(): @@ -845,6 +844,8 @@ print >> f, '}' def gen_stm_prebuilt(f, database): + from rpython.translator.c.primitive import name_signed + # print >> f, '#include "common_header.h"' print >> f, '#include "structdef.h"' print >> f, '#include "forwarddecl.h"' @@ -858,15 +859,21 @@ print >> f, '\t(object_t *)&%s,' % node.name print >> f, '\tNULL' print >> f, '};' - print >> f, '/* long rpy_prebuilt_hashes[] = { ... }; */' # XXX + print >> f, 'static long rpy_prebuilt_hashes[] = {' + for _, node in gclist: + h = database.gcpolicy.get_stm_prebuilt_hash(node.obj) + print >> f, '\t%s,' % (name_signed(h, database),) + print >> f, '};' print >> f, ''' void pypy_stm_setup(void) { stm_setup(); - object_t **pp; - for (pp = rpy_prebuilt; *pp; pp++) { + object_t **pp = rpy_prebuilt; + long *ph = rpy_prebuilt_hashes; + for ( ; *pp; pp++, ph++) { *pp = stm_setup_prebuilt(*pp); + stm_set_prebuilt_identityhash(*pp, *ph); } stm_register_thread_local(&stm_thread_local); From noreply at buildbot.pypy.org Mon Mar 10 19:19:52 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 10 Mar 2014 19:19:52 +0100 (CET) Subject: [pypy-commit] pypy default: optimizations for datetime, only calculate hash once Message-ID: <20140310181952.4C9411C01F0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69838:dd4bd92478ca Date: 2014-03-10 14:17 -0400 http://bitbucket.org/pypy/pypy/changeset/dd4bd92478ca/ Log: optimizations for datetime, only calculate hash once diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -431,7 +431,7 @@ Representation: (days, seconds, microseconds). Why? Because I felt like it. """ - __slots__ = '_days', '_seconds', '_microseconds' + __slots__ = '_days', '_seconds', '_microseconds', '_hashcode' def __new__(cls, days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0): @@ -525,14 +525,14 @@ assert isinstance(s, int) and 0 <= s < 24*3600 assert isinstance(us, int) and 0 <= us < 1000000 + if abs(d) > 999999999: + raise OverflowError("timedelta # of days is too large: %d" % d) + self = object.__new__(cls) - self._days = d self._seconds = s self._microseconds = us - if abs(d) > 999999999: - raise OverflowError("timedelta # of days is too large: %d" % d) - + self._hashcode = -1 return self def __repr__(self): @@ -687,7 +687,9 @@ return _cmp(self._getstate(), other._getstate()) def __hash__(self): - return hash(self._getstate()) + if self._hashcode == -1: + self._hashcode = hash(self._getstate()) + return self._hashcode def __nonzero__(self): return (self._days != 0 or @@ -735,7 +737,7 @@ Properties (readonly): year, month, day """ - __slots__ = '_year', '_month', '_day' + __slots__ = '_year', '_month', '_day', '_hashcode' def __new__(cls, year, month=None, day=None): """Constructor. @@ -749,12 +751,14 @@ # Pickle support self = object.__new__(cls) self.__setstate(year) + self._hashcode = -1 return self year, month, day = _check_date_fields(year, month, day) self = object.__new__(cls) self._year = year self._month = month self._day = day + self._hashcode = -1 return self # Additional constructors @@ -936,7 +940,9 @@ def __hash__(self): "Hash." - return hash(self._getstate()) + if self._hashcode == -1: + self._hashcode = hash(self._getstate()) + return self._hashcode # Computations @@ -1122,7 +1128,7 @@ Properties (readonly): hour, minute, second, microsecond, tzinfo """ - __slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo' + __slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode' def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): """Constructor. @@ -1137,6 +1143,7 @@ # Pickle support self = object.__new__(cls) self.__setstate(hour, minute or None) + self._hashcode = -1 return self hour, minute, second, microsecond = _check_time_fields( hour, minute, second, microsecond) @@ -1147,6 +1154,7 @@ self._second = second self._microsecond = microsecond self._tzinfo = tzinfo + self._hashcode = -1 return self # Read-only field accessors @@ -1242,13 +1250,17 @@ def __hash__(self): """Hash.""" - tzoff = self._utcoffset() - if not tzoff: # zero or None - return hash(self._getstate()[0]) - h, m = divmod(self.hour * 60 + self.minute - tzoff, 60) - if 0 <= h < 24: - return hash(time(h, m, self.second, self.microsecond)) - return hash((h, m, self.second, self.microsecond)) + if self._hashcode == -1: + tzoff = self._utcoffset() + if not tzoff: # zero or None + self._hashcode = hash(self._getstate()[0]) + else: + h, m = divmod(self.hour * 60 + self.minute - tzoff, 60) + if 0 <= h < 24: + self._hashcode = hash(time(h, m, self.second, self.microsecond)) + else: + self._hashcode = hash((h, m, self.second, self.microsecond)) + return self._hashcode # Conversion to string @@ -1408,14 +1420,13 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): + if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): + raise TypeError("bad tzinfo state arg") self._hour, self._minute, self._second, us1, us2, us3 = ( ord(string[0]), ord(string[1]), ord(string[2]), ord(string[3]), ord(string[4]), ord(string[5])) self._microsecond = (((us1 << 8) | us2) << 8) | us3 - if tzinfo is None or isinstance(tzinfo, _tzinfo_class): - self._tzinfo = tzinfo - else: - raise TypeError("bad tzinfo state arg") + self._tzinfo = tzinfo def __reduce__(self): return (time, self._getstate()) @@ -1439,8 +1450,9 @@ if isinstance(year, bytes) and len(year) == 10 and \ 1 <= ord(year[2]) <= 12: # Pickle support - self = date.__new__(cls, year[:4]) + self = object.__new__(cls) self.__setstate(year, month) + self._hashcode = -1 return self year, month, day = _check_date_fields(year, month, day) hour, minute, second, microsecond = _check_time_fields( @@ -1455,6 +1467,7 @@ self._second = second self._microsecond = microsecond self._tzinfo = tzinfo + self._hashcode = -1 return self # Read-only field accessors @@ -1876,12 +1889,15 @@ return base + timedelta(minutes = otoff-myoff) def __hash__(self): - tzoff = self._utcoffset() - if tzoff is None: - return hash(self._getstate()[0]) - days = _ymd2ord(self.year, self.month, self.day) - seconds = self.hour * 3600 + (self.minute - tzoff) * 60 + self.second - return hash(timedelta(days, seconds, self.microsecond)) + if self._hashcode == -1: + tzoff = self._utcoffset() + if tzoff is None: + self._hashcode = hash(self._getstate()[0]) + else: + days = _ymd2ord(self.year, self.month, self.day) + seconds = self.hour * 3600 + (self.minute - tzoff) * 60 + self.second + self._hashcode = hash(timedelta(days, seconds, self.microsecond)) + return self._hashcode # Pickle support. @@ -1898,6 +1914,8 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): + if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): + raise TypeError("bad tzinfo state arg") (yhi, ylo, self._month, self._day, self._hour, self._minute, self._second, us1, us2, us3) = (ord(string[0]), ord(string[1]), ord(string[2]), ord(string[3]), @@ -1905,10 +1923,7 @@ ord(string[7]), ord(string[8]), ord(string[9])) self._year = yhi * 256 + ylo self._microsecond = (((us1 << 8) | us2) << 8) | us3 - if tzinfo is None or isinstance(tzinfo, _tzinfo_class): - self._tzinfo = tzinfo - else: - raise TypeError("bad tzinfo state arg") + self._tzinfo = tzinfo def __reduce__(self): return (self.__class__, self._getstate()) From noreply at buildbot.pypy.org Mon Mar 10 19:30:28 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 10 Mar 2014 19:30:28 +0100 (CET) Subject: [pypy-commit] pypy py3k: optimizations for datetime, only calculate hash once Message-ID: <20140310183028.D08E21C01F0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r69839:7de8ab2d6960 Date: 2014-03-10 14:29 -0400 http://bitbucket.org/pypy/pypy/changeset/7de8ab2d6960/ Log: optimizations for datetime, only calculate hash once diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -333,7 +333,7 @@ Representation: (days, seconds, microseconds). Why? Because I felt like it. """ - __slots__ = '_days', '_seconds', '_microseconds' + __slots__ = '_days', '_seconds', '_microseconds', '_hashcode' def __new__(cls, days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0): @@ -426,14 +426,14 @@ assert isinstance(s, int) and 0 <= s < 24*3600 assert isinstance(us, int) and 0 <= us < 1000000 + if abs(d) > 999999999: + raise OverflowError("timedelta # of days is too large: %d" % d) + self = object.__new__(cls) - self._days = d self._seconds = s self._microseconds = us - if abs(d) > 999999999: - raise OverflowError("timedelta # of days is too large: %d" % d) - + self._hashcode = -1 return self def __repr__(self): @@ -617,7 +617,9 @@ return _cmp(self._getstate(), other._getstate()) def __hash__(self): - return hash(self._getstate()) + if self._hashcode == -1: + self._hashcode = hash(self._getstate()) + return self._hashcode def __bool__(self): return (self._days != 0 or @@ -665,7 +667,7 @@ Properties (readonly): year, month, day """ - __slots__ = '_year', '_month', '_day' + __slots__ = '_year', '_month', '_day', '_hashcode' def __new__(cls, year, month=None, day=None): """Constructor. @@ -679,12 +681,14 @@ # Pickle support self = object.__new__(cls) self.__setstate(year) + self._hashcode = -1 return self year, month, day = _check_date_fields(year, month, day) self = object.__new__(cls) self._year = year self._month = month self._day = day + self._hashcode = -1 return self # Additional constructors @@ -847,7 +851,9 @@ def __hash__(self): "Hash." - return hash(self._getstate()) + if self._hashcode == -1: + self._hashcode = hash(self._getstate()) + return self._hashcode # Computations @@ -1022,7 +1028,7 @@ Properties (readonly): hour, minute, second, microsecond, tzinfo """ - __slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo' + __slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode' def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): """Constructor. @@ -1037,6 +1043,7 @@ # Pickle support self = object.__new__(cls) self.__setstate(hour, minute or None) + self._hashcode = -1 return self hour, minute, second, microsecond = _check_time_fields( hour, minute, second, microsecond) @@ -1047,6 +1054,7 @@ self._second = second self._microsecond = microsecond self._tzinfo = tzinfo + self._hashcode = -1 return self # Read-only field accessors @@ -1142,16 +1150,20 @@ def __hash__(self): """Hash.""" - tzoff = self.utcoffset() - if not tzoff: # zero or None - return hash(self._getstate()[0]) - h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff, - timedelta(hours=1)) - assert not m % timedelta(minutes=1), "whole minute" - m //= timedelta(minutes=1) - if 0 <= h < 24: - return hash(time(h, m, self.second, self.microsecond)) - return hash((h, m, self.second, self.microsecond)) + if self._hashcode == -1: + tzoff = self.utcoffset() + if not tzoff: # zero or None + self._hashcode = hash(self._getstate()[0]) + else: + h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff, + timedelta(hours=1)) + assert not m % timedelta(minutes=1), "whole minute" + m //= timedelta(minutes=1) + if 0 <= h < 24: + self._hashcode = hash(time(h, m, self.second, self.microsecond)) + else: + self._hashcode = hash((h, m, self.second, self.microsecond)) + return self._hashcode # Conversion to string @@ -1292,12 +1304,11 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): + if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): + raise TypeError("bad tzinfo state arg") self._hour, self._minute, self._second, us1, us2, us3 = string self._microsecond = (((us1 << 8) | us2) << 8) | us3 - if tzinfo is None or isinstance(tzinfo, _tzinfo_class): - self._tzinfo = tzinfo - else: - raise TypeError("bad tzinfo state arg") + self._tzinfo = tzinfo def __reduce__(self): return (time, self._getstate()) @@ -1320,8 +1331,9 @@ microsecond=0, tzinfo=None): if isinstance(year, bytes) and len(year) == 10 and 1 <= year[2] <= 12: # Pickle support - self = date.__new__(cls, year[:4]) + self = object.__new__(cls) self.__setstate(year, month) + self._hashcode = -1 return self year, month, day = _check_date_fields(year, month, day) hour, minute, second, microsecond = _check_time_fields( @@ -1336,6 +1348,7 @@ self._second = second self._microsecond = microsecond self._tzinfo = tzinfo + self._hashcode = -1 return self # Read-only field accessors @@ -1736,12 +1749,15 @@ return base + otoff - myoff def __hash__(self): - tzoff = self.utcoffset() - if tzoff is None: - return hash(self._getstate()[0]) - days = _ymd2ord(self.year, self.month, self.day) - seconds = self.hour * 3600 + self.minute * 60 + self.second - return hash(timedelta(days, seconds, self.microsecond) - tzoff) + if self._hashcode == -1: + tzoff = self.utcoffset() + if tzoff is None: + self._hashcode = hash(self._getstate()[0]) + else: + days = _ymd2ord(self.year, self.month, self.day) + seconds = self.hour * 3600 + self.minute * 60 + self.second + self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff) + return self._hashcode # Pickle support. @@ -1758,14 +1774,13 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): + if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): + raise TypeError("bad tzinfo state arg") (yhi, ylo, self._month, self._day, self._hour, self._minute, self._second, us1, us2, us3) = string self._year = yhi * 256 + ylo self._microsecond = (((us1 << 8) | us2) << 8) | us3 - if tzinfo is None or isinstance(tzinfo, _tzinfo_class): - self._tzinfo = tzinfo - else: - raise TypeError("bad tzinfo state arg") + self._tzinfo = tzinfo def __reduce__(self): return (self.__class__, self._getstate()) From noreply at buildbot.pypy.org Mon Mar 10 19:48:57 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 10 Mar 2014 19:48:57 +0100 (CET) Subject: [pypy-commit] pypy default: more test_zipfile cleanups Message-ID: <20140310184857.089CE1C01F0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69840:f53ea2f776fe Date: 2014-03-10 14:48 -0400 http://bitbucket.org/pypy/pypy/changeset/f53ea2f776fe/ Log: more test_zipfile cleanups diff --git a/lib-python/2.7/test/test_zipfile.py b/lib-python/2.7/test/test_zipfile.py --- a/lib-python/2.7/test/test_zipfile.py +++ b/lib-python/2.7/test/test_zipfile.py @@ -779,8 +779,6 @@ rmtree(TESTFN2) def test_write_non_pyfile(self): - if os.path.exists(TESTFN): - os.remove(TESTFN) with zipfile.PyZipFile(TemporaryFile(), "w") as zipfp: with open(TESTFN, 'w') as fid: fid.write('most definitely not a python file') @@ -1377,6 +1375,8 @@ zipf.read('ones') with zipf.open('ones') as zopen1: pass + with open(os.devnull) as f: + self.assertLess(f.fileno(), 100) def tearDown(self): unlink(TESTFN2) From noreply at buildbot.pypy.org Mon Mar 10 19:57:28 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 10 Mar 2014 19:57:28 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_kqueue for new bounds checking Message-ID: <20140310185728.8B9371C1178@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69841:096eef91e0a0 Date: 2014-03-10 11:56 -0700 http://bitbucket.org/pypy/pypy/changeset/096eef91e0a0/ Log: fix test_kqueue for new bounds checking diff --git a/pypy/module/select/test/test_kqueue.py b/pypy/module/select/test/test_kqueue.py --- a/pypy/module/select/test/test_kqueue.py +++ b/pypy/module/select/test/test_kqueue.py @@ -74,7 +74,7 @@ assert ev != other bignum = (sys.maxsize * 2 + 1) & 0xffffffff - fd = sys.maxsize + fd = 2**31 - 1 ev = select.kevent(fd, 1, 2, bignum, sys.maxsize, bignum) assert ev.ident == fd assert ev.filter == 1 @@ -85,6 +85,9 @@ assert ev == ev assert ev != other + exc = raises(ValueError, select.kevent, fd + 1, 1, 2, bignum, sys.maxsize, bignum) + assert exc.value[0] == "file descriptor cannot be a negative integer (-1)" + def test_queue_event(self): import errno import select From noreply at buildbot.pypy.org Mon Mar 10 22:00:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 10 Mar 2014 22:00:45 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: we assume the larger MAXREPEAT now, skip behavior not applicable to 3.2.3 Message-ID: <20140310210045.C70341C03B3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69842:b6d5cac0138a Date: 2014-03-10 13:59 -0700 http://bitbucket.org/pypy/pypy/changeset/b6d5cac0138a/ Log: we assume the larger MAXREPEAT now, skip behavior not applicable to 3.2.3 diff --git a/lib-python/3/sre_constants.py b/lib-python/3/sre_constants.py --- a/lib-python/3/sre_constants.py +++ b/lib-python/3/sre_constants.py @@ -15,9 +15,11 @@ MAGIC = 20031017 -# max code word in this release - -MAXREPEAT = 65535 +try: + from _sre import MAXREPEAT +except ImportError: + import _sre + MAXREPEAT = _sre.MAXREPEAT = 65535 # SRE standard exception (access as sre.error) # should this really be here? diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -60,6 +60,10 @@ assert re.match(r".{%d}" % (self.s.MAXREPEAT - 1), string) is None assert re.match(r".{,%d}" % (self.s.MAXREPEAT - 1), string).span() == (0, 100000) assert re.match(r".{%d,}?" % (self.s.MAXREPEAT - 1), string) is None + import sys + if sys.version_info[:3] <= (3, 2, 3): + # XXX: These are fixed in 3.2.4 or so + return raises(OverflowError, re.compile, r".{%d}" % self.s.MAXREPEAT) raises(OverflowError, re.compile, r".{,%d}" % self.s.MAXREPEAT) raises(OverflowError, re.compile, r".{%d,}?" % self.s.MAXREPEAT) From noreply at buildbot.pypy.org Mon Mar 10 22:16:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Mar 2014 22:16:41 +0100 (CET) Subject: [pypy-commit] pypy default: Fix xmlcharrefreplace_errors in the same way as it was fixed between Message-ID: <20140310211641.D9EA61C1178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69843:cc1f466981ae Date: 2014-03-10 22:15 +0100 http://bitbucket.org/pypy/pypy/changeset/cc1f466981ae/ Log: Fix xmlcharrefreplace_errors in the same way as it was fixed between CPython 2.7.3 and 2.7.6. Hard to test before translation due to a mess with host-vs-interpreted maxunicode. diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -1,7 +1,7 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rstring import UnicodeBuilder -from rpython.rlib.runicode import UNICHR +from rpython.rlib.runicode import UNICHR, MAXUNICODE from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault @@ -228,9 +228,15 @@ builder = UnicodeBuilder() pos = start while pos < end: - ch = obj[pos] + code = ord(obj[pos]) + if (MAXUNICODE == 0xffff and 0xD800 <= code <= 0xDBFF and + pos + 1 < end and 0xDC00 <= ord(obj[pos+1]) <= 0xDFFF): + code = (code & 0x03FF) << 10 + code |= ord(obj[pos+1]) & 0x03FF + code += 0x10000 + pos += 1 builder.append(u"&#") - builder.append(unicode(str(ord(ch)))) + builder.append(unicode(str(code))) builder.append(u";") pos += 1 return space.newtuple([space.wrap(builder.build()), w_end]) From noreply at buildbot.pypy.org Mon Mar 10 22:50:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 10 Mar 2014 22:50:14 +0100 (CET) Subject: [pypy-commit] pypy default: fix size of numpy unicode dtype Message-ID: <20140310215014.18EF11C01F0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69844:4ca307beabf3 Date: 2014-03-10 17:49 -0400 http://bitbucket.org/pypy/pypy/changeset/4ca307beabf3/ Log: fix size of numpy unicode dtype diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1738,7 +1738,10 @@ self._store(storage, i, offset, box, width) class UnicodeType(FlexibleType): - T = lltype.UniChar + T = lltype.Char + + def get_element_size(self): + return 4 # always UTF-32 @jit.unroll_safe def coerce(self, space, dtype, w_item): From noreply at buildbot.pypy.org Tue Mar 11 00:53:46 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 11 Mar 2014 00:53:46 +0100 (CET) Subject: [pypy-commit] pypy numpypy-nditer: fix these tests when run -A Message-ID: <20140310235346.3BDEA1C01F0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpypy-nditer Changeset: r69847:48ab6eff26dc Date: 2014-03-10 19:53 -0400 http://bitbucket.org/pypy/pypy/changeset/48ab6eff26dc/ Log: fix these tests when run -A diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -138,7 +138,7 @@ if '__pypy__' in sys.builtin_module_names: raises(NotImplementedError, nditer, a, op_dtypes=['complex']) skip('nditer op_dtypes kwarg not implemented yet') - exc = raises(ValueError, nditer, a, op_dtypes=['complex']) + exc = raises(TypeError, nditer, a, op_dtypes=['complex']) assert str(exc.value).startswith("Iterator operand required copying or buffering") r = [] for x in nditer(a, op_flags=['readonly','copy'], @@ -160,19 +160,19 @@ if '__pypy__' in sys.builtin_module_names: raises(NotImplementedError, nditer, a, flags=['buffered'], op_dtypes=['float32']) skip('nditer casting not implemented yet') - exc = raises(ValueError, nditer, a, flags=['buffered'], op_dtypes=['float32']) + exc = raises(TypeError, nditer, a, flags=['buffered'], op_dtypes=['float32']) assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") r = [] for x in nditer(a, flags=['buffered'], op_dtypes=['float32'], casting='same_kind'): r.append(x) assert r == [0., 1., 2., 3., 4., 5.] - exc = raises(ValueError, nditer, a, flags=['buffered'], + exc = raises(TypeError, nditer, a, flags=['buffered'], op_dtypes=['int32'], casting='same_kind') assert str(exc.value).startswith("Iterator operand 0 dtype could not be cast") r = [] b = arange(6) - exc = raises(ValueError, nditer, b, flags=['buffered'], op_dtypes=['float64'], + exc = raises(TypeError, nditer, b, flags=['buffered'], op_dtypes=['float64'], op_flags=['readwrite'], casting='same_kind') assert str(exc.value).startswith("Iterator requested dtype could not be cast") From noreply at buildbot.pypy.org Tue Mar 11 00:51:25 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 11 Mar 2014 00:51:25 +0100 (CET) Subject: [pypy-commit] pypy default: unused Message-ID: <20140310235125.B7E4D1C01F0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69845:3eb83f4075a3 Date: 2014-03-10 19:47 -0400 http://bitbucket.org/pypy/pypy/changeset/3eb83f4075a3/ Log: unused diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import oefmt from rpython.rlib.listsort import make_timsort_class from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import widen From noreply at buildbot.pypy.org Tue Mar 11 03:43:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 11 Mar 2014 03:43:32 +0100 (CET) Subject: [pypy-commit] pypy default: fix charmap_decode on win32 Message-ID: <20140311024332.6FECA1C01F0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69848:71f5a2515344 Date: 2014-03-10 22:42 -0400 http://bitbucket.org/pypy/pypy/changeset/71f5a2515344/ Log: fix charmap_decode on win32 diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -1,7 +1,7 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rstring import UnicodeBuilder -from rpython.rlib.runicode import UNICHR, MAXUNICODE +from rpython.rlib.runicode import code_to_unichr, MAXUNICODE from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault @@ -541,7 +541,7 @@ if not 0 <= x <= 0x10FFFF: raise oefmt(space.w_TypeError, "character mapping must be in range(0x110000)") - return UNICHR(x) + return code_to_unichr(x) elif space.is_w(w_ch, space.w_None): # Charmap may return None return errorchar From noreply at buildbot.pypy.org Tue Mar 11 09:33:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 09:33:37 +0100 (CET) Subject: [pypy-commit] pypy default: A potential way to implement better "x in constant-tuple"? Message-ID: <20140311083337.603321C11A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69849:2c20730cc1bd Date: 2014-03-11 09:32 +0100 http://bitbucket.org/pypy/pypy/changeset/2c20730cc1bd/ Log: A potential way to implement better "x in constant-tuple"? diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3970,3 +3970,11 @@ return a.x res = self.interp_operations(f, [42]) assert res == 0 + + def test_conditions_without_guards(self): + def f(n): + if (n == 1) | (n == 3) | (n == 17): + return 42 + return 5 + res = self.interp_operations(f, [17]) + assert res == 42 From noreply at buildbot.pypy.org Tue Mar 11 09:34:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 09:34:39 +0100 (CET) Subject: [pypy-commit] pypy default: Check that we only get one guard. Message-ID: <20140311083439.A92D31C11A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69850:5b867d590595 Date: 2014-03-11 09:34 +0100 http://bitbucket.org/pypy/pypy/changeset/5b867d590595/ Log: Check that we only get one guard. diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3978,3 +3978,4 @@ return 5 res = self.interp_operations(f, [17]) assert res == 42 + self.check_operations_history(guard_true=1, guard_false=0) From noreply at buildbot.pypy.org Tue Mar 11 09:59:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 09:59:56 +0100 (CET) Subject: [pypy-commit] pypy default: Implement "x in (constant-tuple)" not by doing a dictionary lookup, Message-ID: <20140311085956.2BFCC1C11A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69851:c823adbf0d32 Date: 2014-03-11 09:59 +0100 http://bitbucket.org/pypy/pypy/changeset/c823adbf0d32/ Log: Implement "x in (constant-tuple)" not by doing a dictionary lookup, but instead as a chain of equalities. diff --git a/rpython/rtyper/rtuple.py b/rpython/rtyper/rtuple.py --- a/rpython/rtyper/rtuple.py +++ b/rpython/rtyper/rtuple.py @@ -290,20 +290,23 @@ if not s_tup.is_constant(): raise TyperError("contains() on non-const tuple") t = s_tup.const - typ = type(t[0]) - for x in t[1:]: - if type(x) is not typ: - raise TyperError("contains() on mixed-type tuple " - "constant %r" % (t,)) - d = {} + if len(t) == 0: + hop.exception_cannot_occur() + return hop.inputconst(Bool, False) + r_item = hop.args_r[1] + v_arg = hop.inputarg(r_item, arg=1) + ll_eq = r_item.get_ll_eq_function() or _ll_equal + v_result = None for x in t: - d[x] = None - hop2 = hop.copy() - _, _ = hop2.r_s_popfirstarg() - v_dict = Constant(d) - s_dict = hop.rtyper.annotator.bookkeeper.immutablevalue(d) - hop2.v_s_insertfirstarg(v_dict, s_dict) - return hop2.dispatch() + c_tuple_item = hop.inputconst(r_item, x) + v_equal = hop.gendirectcall(ll_eq, v_arg, c_tuple_item) + if v_result is None: + v_result = v_equal + else: + v_result = hop.genop("int_or", [v_result, v_equal], + resulttype = Bool) + hop.exception_cannot_occur() + return v_result or hop.inputconst(Bool, False) class __extend__(pairtype(TupleRepr, TupleRepr)): @@ -400,3 +403,6 @@ return t.item0 else: raise StopIteration + +def _ll_equal(x, y): + return x == y diff --git a/rpython/rtyper/test/test_rtuple.py b/rpython/rtyper/test/test_rtuple.py --- a/rpython/rtyper/test/test_rtuple.py +++ b/rpython/rtyper/test/test_rtuple.py @@ -73,6 +73,20 @@ res = self.interpret(f, [0]) assert res is False + def test_constant_tuple_contains3(self): + def f(i): + return i in () + res = self.interpret(f, [3]) + assert res is False + + def test_constant_tuple_contains4(self): + def f(i): + return i in (3,) + res = self.interpret(f, [3]) + assert res is True + res = self.interpret(f, [4]) + assert res is False + def test_constant_unichar_tuple_contains(self): def f(i): return unichr(i) in (u'1', u'9') From noreply at buildbot.pypy.org Tue Mar 11 10:05:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 10:05:37 +0100 (CET) Subject: [pypy-commit] stmgc default: Clear bytes of raw memory on abort Message-ID: <20140311090537.414051C314C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r971:58c5e282befd Date: 2014-03-11 08:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/58c5e282befd/ Log: Clear bytes of raw memory on abort diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -498,6 +498,11 @@ stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; + /* clear memory registered on the thread-local */ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + if (tl->mem_clear_on_abort) + memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); + if (STM_SEGMENT->nursery_end == NSE_SIGABORT) STM_SEGMENT->nursery_end = NURSERY_END; /* done aborting */ diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -53,6 +53,10 @@ object_t **shadowstack, **shadowstack_base; /* a generic optional thread-local object */ object_t *thread_local_obj; + /* in case this thread runs a transaction that aborts, + the following raw region of memory is cleared. */ + char *mem_clear_on_abort; + size_t mem_bytes_to_clear_on_abort; /* the next fields are handled automatically by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -15,6 +15,8 @@ typedef struct { object_t **shadowstack, **shadowstack_base; object_t *thread_local_obj; + char *mem_clear_on_abort; + size_t mem_bytes_to_clear_on_abort; int associated_segment_num; ...; } stm_thread_local_t; @@ -405,6 +407,9 @@ lib.stm_unregister_thread_local(tl) lib.stm_teardown() + def get_stm_thread_local(self): + return self.tls[self.current_thread] + def start_transaction(self): tl = self.tls[self.current_thread] assert not lib._stm_in_transaction(tl) diff --git a/c7/test/test_extra.py b/c7/test/test_extra.py new file mode 100644 --- /dev/null +++ b/c7/test/test_extra.py @@ -0,0 +1,19 @@ +from support import * +import py + +class TestExtra(BaseTest): + + def test_clear_on_abort(self): + p = ffi.new("char[]", "hello") + tl = self.get_stm_thread_local() + tl.mem_clear_on_abort = p + tl.mem_bytes_to_clear_on_abort = 2 + # + self.start_transaction() + assert ffi.string(p) == "hello" + self.abort_transaction() + assert p[0] == '\0' + assert p[1] == '\0' + assert p[2] == 'l' + assert p[3] == 'l' + assert p[4] == 'o' From noreply at buildbot.pypy.org Tue Mar 11 10:05:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 10:05:38 +0100 (CET) Subject: [pypy-commit] stmgc default: Implementation and test for stm_call_on_abort(). Message-ID: <20140311090538.7DEB41C314C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r972:d912ca6f0d53 Date: 2014-03-11 10:04 +0100 http://bitbucket.org/pypy/stmgc/changeset/d912ca6f0d53/ Log: Implementation and test for stm_call_on_abort(). diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -180,6 +180,7 @@ assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); + assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); @@ -378,6 +379,8 @@ STM_PSEGMENT->overflow_number_has_been_used = false; } + clear_callbacks_on_abort(); + /* send what is hopefully the correct signals */ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* wake up one thread in wait_for_end_of_inevitable_transaction() */ @@ -503,6 +506,9 @@ if (tl->mem_clear_on_abort) memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); + /* invoke the callbacks */ + invoke_and_clear_callbacks_on_abort(); + if (STM_SEGMENT->nursery_end == NSE_SIGABORT) STM_SEGMENT->nursery_end = NURSERY_END; /* done aborting */ @@ -542,6 +548,7 @@ wait_for_end_of_inevitable_transaction(true); STM_PSEGMENT->transaction_state = TS_INEVITABLE; STM_SEGMENT->jmpbuf_ptr = NULL; + clear_callbacks_on_abort(); } s_mutex_unlock(); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -105,6 +105,9 @@ next minor collection. */ struct tree_s *nursery_objects_shadows; + /* Tree of 'key->callback' associations from stm_call_on_abort() */ + struct tree_s *callbacks_on_abort; + /* Start time: to know approximately for how long a transaction has been running, in contention management */ uint64_t start_time; diff --git a/c7/stm/extra.c b/c7/stm/extra.c new file mode 100644 --- /dev/null +++ b/c7/stm/extra.c @@ -0,0 +1,55 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +void stm_call_on_abort(void *key, void callback(void *)) +{ + assert(_running_transaction()); + + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + /* ignore callbacks if we're in an inevitable transaction + (which cannot abort) */ + return; + } + + if (callback == NULL) { + /* ignore the return value: unregistered keys can be + "deleted" again */ + tree_delete_item(STM_PSEGMENT->callbacks_on_abort, (uintptr_t)key); + } + else { + /* double-registering the same key will crash */ + tree_insert(STM_PSEGMENT->callbacks_on_abort, + (uintptr_t)key, (uintptr_t)callback); + } +} + +static void clear_callbacks_on_abort(void) +{ + if (!tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)) + tree_clear(STM_PSEGMENT->callbacks_on_abort); +} + +static void invoke_and_clear_callbacks_on_abort(void) +{ + wlog_t *item; + struct tree_s *callbacks = STM_PSEGMENT->callbacks_on_abort; + if (tree_is_cleared(callbacks)) + return; + STM_PSEGMENT->callbacks_on_abort = tree_create(); + + TREE_LOOP_FORWARD(*callbacks, item) { + void *key = (void *)item->addr; + void (*callback)(void *) = (void(*)(void *))item->val; + assert(key != NULL); + assert(callback != NULL); + + /* The callback may call stm_call_on_abort(key, NULL). It is + ignored, because 'callbacks_on_abort' was cleared already. */ + callback(key); + + } TREE_LOOP_END; + + tree_free(callbacks); +} diff --git a/c7/stm/extra.h b/c7/stm/extra.h new file mode 100644 --- /dev/null +++ b/c7/stm/extra.h @@ -0,0 +1,3 @@ + +static void clear_callbacks_on_abort(void); +static void invoke_and_clear_callbacks_on_abort(void); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -59,6 +59,7 @@ pr->modified_old_objects = list_create(); pr->young_outside_nursery = tree_create(); pr->nursery_objects_shadows = tree_create(); + pr->callbacks_on_abort = tree_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * (i + 1); highest_overflow_number = pr->overflow_number; } @@ -96,6 +97,7 @@ list_free(pr->modified_old_objects); tree_free(pr->young_outside_nursery); tree_free(pr->nursery_objects_shadows); + tree_free(pr->callbacks_on_abort); } munmap(stm_object_pages, TOTAL_MEMORY); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -10,6 +10,7 @@ #include "stm/largemalloc.h" #include "stm/nursery.h" #include "stm/contention.h" +#include "stm/extra.h" #include "stm/fprintcolor.h" #include "stm/misc.c" @@ -25,4 +26,5 @@ #include "stm/hash_id.c" #include "stm/core.c" #include "stm/contention.c" +#include "stm/extra.c" #include "stm/fprintcolor.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -277,6 +277,13 @@ otherwise. After a minor collection no object can move any more. */ long stm_can_move(object_t *); +/* If the current transaction aborts later, invoke 'callback(key)'. If + the current transaction commits, then the callback is forgotten. You + can only register one callback per key. You can call + 'stm_call_on_abort(key, NULL)' to cancel an existing callback. + Note: 'key' must be aligned to a multiple of 8 bytes. */ +void stm_call_on_abort(void *key, void callback(void *)); + /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -81,6 +81,7 @@ void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash); int stm_can_move(object_t *); +void stm_call_on_abort(void *key, void callback(void *)); """) diff --git a/c7/test/test_extra.py b/c7/test/test_extra.py --- a/c7/test/test_extra.py +++ b/c7/test/test_extra.py @@ -1,6 +1,15 @@ from support import * import py +def ffi_new_aligned(string): + ALIGN = ffi.sizeof("void *") + p1 = ffi.new("void *[]", (len(string) + ALIGN) // ALIGN) + p2 = ffi.gc(ffi.cast("char *", p1), lambda p2: p1) + p2[0:len(string)+1] = string + '\x00' + assert ffi.string(p2) == string + return p2 + + class TestExtra(BaseTest): def test_clear_on_abort(self): @@ -17,3 +26,45 @@ assert p[2] == 'l' assert p[3] == 'l' assert p[4] == 'o' + + def test_call_on_abort(self): + p0 = ffi_new_aligned("aaa") + p1 = ffi_new_aligned("hello") + p2 = ffi_new_aligned("removed") + p3 = ffi_new_aligned("world") + # + @ffi.callback("void(void *)") + def clear_me(p): + p = ffi.cast("char *", p) + p[0] = chr(ord(p[0]) + 1) + # + self.start_transaction() + lib.stm_call_on_abort(p0, clear_me) + # the registered callbacks are removed on + # successful commit + self.commit_transaction() + assert ffi.string(p0) == "aaa" + # + self.start_transaction() + lib.stm_call_on_abort(p1, clear_me) + lib.stm_call_on_abort(p2, clear_me) + lib.stm_call_on_abort(p3, clear_me) + lib.stm_call_on_abort(p2, ffi.NULL) + assert ffi.string(p0) == "aaa" + assert ffi.string(p1) == "hello" + assert ffi.string(p2) == "removed" + assert ffi.string(p3) == "world" + self.abort_transaction() + # + assert ffi.string(p0) == "aaa" + assert ffi.string(p1) == "iello" + assert ffi.string(p2) == "removed" + assert ffi.string(p3) == "xorld" + # + # the registered callbacks are removed on abort + self.start_transaction() + self.abort_transaction() + assert ffi.string(p0) == "aaa" + assert ffi.string(p1) == "iello" + assert ffi.string(p2) == "removed" + assert ffi.string(p3) == "xorld" From noreply at buildbot.pypy.org Tue Mar 11 10:06:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 10:06:17 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/d912ca6f0d53 Message-ID: <20140311090617.5C57C1C314C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69852:9d5940288fd5 Date: 2014-03-11 10:05 +0100 http://bitbucket.org/pypy/pypy/changeset/9d5940288fd5/ Log: import stmgc/d912ca6f0d53 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -b4a037995423 +d912ca6f0d53 diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -181,6 +181,7 @@ assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); + assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); @@ -379,6 +380,8 @@ STM_PSEGMENT->overflow_number_has_been_used = false; } + clear_callbacks_on_abort(); + /* send what is hopefully the correct signals */ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* wake up one thread in wait_for_end_of_inevitable_transaction() */ @@ -499,6 +502,14 @@ stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; + /* clear memory registered on the thread-local */ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + if (tl->mem_clear_on_abort) + memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); + + /* invoke the callbacks */ + invoke_and_clear_callbacks_on_abort(); + if (STM_SEGMENT->nursery_end == NSE_SIGABORT) STM_SEGMENT->nursery_end = NURSERY_END; /* done aborting */ @@ -538,6 +549,7 @@ wait_for_end_of_inevitable_transaction(true); STM_PSEGMENT->transaction_state = TS_INEVITABLE; STM_SEGMENT->jmpbuf_ptr = NULL; + clear_callbacks_on_abort(); } s_mutex_unlock(); diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -106,6 +106,9 @@ next minor collection. */ struct tree_s *nursery_objects_shadows; + /* Tree of 'key->callback' associations from stm_call_on_abort() */ + struct tree_s *callbacks_on_abort; + /* Start time: to know approximately for how long a transaction has been running, in contention management */ uint64_t start_time; diff --git a/rpython/translator/stm/src_stm/stm/extra.c b/rpython/translator/stm/src_stm/stm/extra.c new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/extra.c @@ -0,0 +1,56 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +void stm_call_on_abort(void *key, void callback(void *)) +{ + assert(_running_transaction()); + + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + /* ignore callbacks if we're in an inevitable transaction + (which cannot abort) */ + return; + } + + if (callback == NULL) { + /* ignore the return value: unregistered keys can be + "deleted" again */ + tree_delete_item(STM_PSEGMENT->callbacks_on_abort, (uintptr_t)key); + } + else { + /* double-registering the same key will crash */ + tree_insert(STM_PSEGMENT->callbacks_on_abort, + (uintptr_t)key, (uintptr_t)callback); + } +} + +static void clear_callbacks_on_abort(void) +{ + if (!tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)) + tree_clear(STM_PSEGMENT->callbacks_on_abort); +} + +static void invoke_and_clear_callbacks_on_abort(void) +{ + wlog_t *item; + struct tree_s *callbacks = STM_PSEGMENT->callbacks_on_abort; + if (tree_is_cleared(callbacks)) + return; + STM_PSEGMENT->callbacks_on_abort = tree_create(); + + TREE_LOOP_FORWARD(*callbacks, item) { + void *key = (void *)item->addr; + void (*callback)(void *) = (void(*)(void *))item->val; + assert(key != NULL); + assert(callback != NULL); + + /* The callback may call stm_call_on_abort(key, NULL). It is + ignored, because 'callbacks_on_abort' was cleared already. */ + callback(key); + + } TREE_LOOP_END; + + tree_free(callbacks); +} diff --git a/rpython/translator/stm/src_stm/stm/extra.h b/rpython/translator/stm/src_stm/stm/extra.h new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/extra.h @@ -0,0 +1,4 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ + +static void clear_callbacks_on_abort(void); +static void invoke_and_clear_callbacks_on_abort(void); diff --git a/rpython/translator/stm/src_stm/stm/hash_id.c b/rpython/translator/stm/src_stm/stm/hash_id.c --- a/rpython/translator/stm/src_stm/stm/hash_id.c +++ b/rpython/translator/stm/src_stm/stm/hash_id.c @@ -56,7 +56,7 @@ return id_or_identityhash(obj, true); } -void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash) +void stm_set_prebuilt_identityhash(object_t *obj, long hash) { struct object_s *realobj = (struct object_s *) REAL_ADDRESS(stm_object_pages, obj); diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -60,6 +60,7 @@ pr->modified_old_objects = list_create(); pr->young_outside_nursery = tree_create(); pr->nursery_objects_shadows = tree_create(); + pr->callbacks_on_abort = tree_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * (i + 1); highest_overflow_number = pr->overflow_number; } @@ -97,6 +98,7 @@ list_free(pr->modified_old_objects); tree_free(pr->young_outside_nursery); tree_free(pr->nursery_objects_shadows); + tree_free(pr->callbacks_on_abort); } munmap(stm_object_pages, TOTAL_MEMORY); diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c --- a/rpython/translator/stm/src_stm/stmgc.c +++ b/rpython/translator/stm/src_stm/stmgc.c @@ -11,6 +11,7 @@ #include "stm/largemalloc.h" #include "stm/nursery.h" #include "stm/contention.h" +#include "stm/extra.h" #include "stm/fprintcolor.h" #include "stm/misc.c" @@ -26,4 +27,5 @@ #include "stm/hash_id.c" #include "stm/core.c" #include "stm/contention.c" +#include "stm/extra.c" #include "stm/fprintcolor.c" diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -54,6 +54,10 @@ object_t **shadowstack, **shadowstack_base; /* a generic optional thread-local object */ object_t *thread_local_obj; + /* in case this thread runs a transaction that aborts, + the following raw region of memory is cleared. */ + char *mem_clear_on_abort; + size_t mem_bytes_to_clear_on_abort; /* the next fields are handled automatically by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; @@ -268,12 +272,19 @@ controlled for each prebuilt object individually. (Useful uor PyPy) */ long stm_identityhash(object_t *obj); long stm_id(object_t *obj); -void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash); +void stm_set_prebuilt_identityhash(object_t *obj, long hash); /* Returns 1 if the object can still move (it's in the nursery), or 0 otherwise. After a minor collection no object can move any more. */ long stm_can_move(object_t *); +/* If the current transaction aborts later, invoke 'callback(key)'. If + the current transaction commits, then the callback is forgotten. You + can only register one callback per key. You can call + 'stm_call_on_abort(key, NULL)' to cancel an existing callback. + Note: 'key' must be aligned to a multiple of 8 bytes. */ +void stm_call_on_abort(void *key, void callback(void *)); + /* ==================== END ==================== */ From noreply at buildbot.pypy.org Tue Mar 11 10:15:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 10:15:54 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: tweak tweak Message-ID: <20140311091554.50B201C314C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69853:a7fa5589af70 Date: 2014-03-11 10:15 +0100 http://bitbucket.org/pypy/pypy/changeset/a7fa5589af70/ Log: tweak tweak diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -21,11 +21,11 @@ [s_gc, s_typeid16, annmodel.SomeInteger(nonneg=True), s_gcref], s_gcref) # - def pypy_stmcb_size(obj): + def pypy_stmcb_size_rounded_up(obj): return gc.get_size(obj) - pypy_stmcb_size.c_name = "pypy_stmcb_size" + pypy_stmcb_size_rounded_up.c_name = "pypy_stmcb_size_rounded_up" self.autoregister_ptrs.append( - getfn(pypy_stmcb_size, [llannotation.SomeAddress()], + getfn(pypy_stmcb_size_rounded_up, [llannotation.SomeAddress()], annmodel.SomeInteger())) # def invokecallback(root, visit_fn): diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -44,7 +44,10 @@ def stm_register_thread_local(funcgen, op): - return 'stm_register_thread_local(&stm_thread_local);' + return ( + 'stm_register_thread_local(&stm_thread_local);\n\t' + 'stm_thread_local.mem_clear_on_abort = &pypy_g_ExcData;\n\t' + 'stm_thread_local.mem_bytes_to_clear_on_abort = sizeof(pypy_g_ExcData);') def stm_unregister_thread_local(funcgen, op): return 'stm_unregister_thread_local(&stm_thread_local);' diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -4,11 +4,11 @@ __thread struct stm_thread_local_s stm_thread_local; -extern Signed pypy_stmcb_size(void*); +extern Signed pypy_stmcb_size_rounded_up(void*); extern void pypy_stmcb_trace(void*, void(*)(void*)); -inline size_t stmcb_size(struct object_s *obj) { - return pypy_stmcb_size(obj); +inline ssize_t stmcb_size_rounded_up(struct object_s *obj) { + return pypy_stmcb_size_rounded_up(obj); } inline void stmcb_trace(struct object_s *obj, void visit(object_t **)) { From noreply at buildbot.pypy.org Tue Mar 11 10:28:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 10:28:13 +0100 (CET) Subject: [pypy-commit] stmgc default: PyPy calls stm_call_on_abort() even outside transactions. Message-ID: <20140311092813.5C5BE1C32D3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r973:6718bfcdc402 Date: 2014-03-11 10:28 +0100 http://bitbucket.org/pypy/stmgc/changeset/6718bfcdc402/ Log: PyPy calls stm_call_on_abort() even outside transactions. diff --git a/c7/stm/extra.c b/c7/stm/extra.c --- a/c7/stm/extra.c +++ b/c7/stm/extra.c @@ -3,9 +3,14 @@ #endif -void stm_call_on_abort(void *key, void callback(void *)) +void stm_call_on_abort(stm_thread_local_t *tl, + void *key, void callback(void *)) { - assert(_running_transaction()); + if (!_stm_in_transaction(tl)) { + /* check that the current thread-local is really running a + transaction, and do nothing otherwise. */ + return; + } if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* ignore callbacks if we're in an inevitable transaction diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -282,7 +282,7 @@ can only register one callback per key. You can call 'stm_call_on_abort(key, NULL)' to cancel an existing callback. Note: 'key' must be aligned to a multiple of 8 bytes. */ -void stm_call_on_abort(void *key, void callback(void *)); +void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -81,7 +81,7 @@ void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash); int stm_can_move(object_t *); -void stm_call_on_abort(void *key, void callback(void *)); +void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); """) diff --git a/c7/test/test_extra.py b/c7/test/test_extra.py --- a/c7/test/test_extra.py +++ b/c7/test/test_extra.py @@ -39,17 +39,17 @@ p[0] = chr(ord(p[0]) + 1) # self.start_transaction() - lib.stm_call_on_abort(p0, clear_me) + lib.stm_call_on_abort(self.get_stm_thread_local(), p0, clear_me) # the registered callbacks are removed on # successful commit self.commit_transaction() assert ffi.string(p0) == "aaa" # self.start_transaction() - lib.stm_call_on_abort(p1, clear_me) - lib.stm_call_on_abort(p2, clear_me) - lib.stm_call_on_abort(p3, clear_me) - lib.stm_call_on_abort(p2, ffi.NULL) + lib.stm_call_on_abort(self.get_stm_thread_local(), p1, clear_me) + lib.stm_call_on_abort(self.get_stm_thread_local(), p2, clear_me) + lib.stm_call_on_abort(self.get_stm_thread_local(), p3, clear_me) + lib.stm_call_on_abort(self.get_stm_thread_local(), p2, ffi.NULL) assert ffi.string(p0) == "aaa" assert ffi.string(p1) == "hello" assert ffi.string(p2) == "removed" @@ -68,3 +68,15 @@ assert ffi.string(p1) == "iello" assert ffi.string(p2) == "removed" assert ffi.string(p3) == "xorld" + + def test_ignores_if_outside_transaction(self): + @ffi.callback("void(void *)") + def dont_see_me(p): + seen.append(p) + # + seen = [] + p0 = ffi_new_aligned("aaa") + lib.stm_call_on_abort(self.get_stm_thread_local(), p0, dont_see_me) + self.start_transaction() + self.abort_transaction() + assert seen == [] From noreply at buildbot.pypy.org Tue Mar 11 10:41:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 10:41:59 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/6718bfcdc402 Message-ID: <20140311094159.713ED1C0686@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69854:0ba9edd00a2b Date: 2014-03-11 10:28 +0100 http://bitbucket.org/pypy/pypy/changeset/0ba9edd00a2b/ Log: import stmgc/6718bfcdc402 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -d912ca6f0d53 +6718bfcdc402 diff --git a/rpython/translator/stm/src_stm/stm/extra.c b/rpython/translator/stm/src_stm/stm/extra.c --- a/rpython/translator/stm/src_stm/stm/extra.c +++ b/rpython/translator/stm/src_stm/stm/extra.c @@ -4,9 +4,14 @@ #endif -void stm_call_on_abort(void *key, void callback(void *)) +void stm_call_on_abort(stm_thread_local_t *tl, + void *key, void callback(void *)) { - assert(_running_transaction()); + if (!_stm_in_transaction(tl)) { + /* check that the current thread-local is really running a + transaction, and do nothing otherwise. */ + return; + } if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* ignore callbacks if we're in an inevitable transaction diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -283,7 +283,7 @@ can only register one callback per key. You can call 'stm_call_on_abort(key, NULL)' to cancel an existing callback. Note: 'key' must be aligned to a multiple of 8 bytes. */ -void stm_call_on_abort(void *key, void callback(void *)); +void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); /* ==================== END ==================== */ From noreply at buildbot.pypy.org Tue Mar 11 10:42:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 10:42:00 +0100 (CET) Subject: [pypy-commit] pypy default: DEBUGFLAGS contains -O1, so it will override this pointless -O0 here Message-ID: <20140311094200.AB6631C0686@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69855:88e5cdb50ff9 Date: 2014-03-11 10:41 +0100 http://bitbucket.org/pypy/pypy/changeset/88e5cdb50ff9/ Log: DEBUGFLAGS contains -O1, so it will override this pointless -O0 here diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -405,7 +405,7 @@ ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPYPY_USE_LINUXMEMCHK" debug_target'), ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(TARGET)'), ('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), - ('lldebug0','', '$(MAKE) CFLAGS="-O0 $(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), + ('lldebug0','', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -O0 -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), ('profile', '', '$(MAKE) CFLAGS="-g -O1 -pg $(CFLAGS) -fno-omit-frame-pointer" LDFLAGS="-pg $(LDFLAGS)" $(TARGET)'), ] if self.has_profopt(): From noreply at buildbot.pypy.org Tue Mar 11 10:50:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 10:50:49 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: tweaks Message-ID: <20140311095049.261D41C0686@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69856:304495dc8009 Date: 2014-03-11 10:41 +0100 http://bitbucket.org/pypy/pypy/changeset/304495dc8009/ Log: tweaks diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -819,12 +819,13 @@ # generate the start-up code and put it into a function print >> f, 'char *RPython_StartupCode(void) {' print >> f, '\tchar *error = NULL;' - for line in database.gcpolicy.gc_startup_code(): - print >> f,"\t" + line if database.with_stm: print >> f, '\tpypy_stm_setup();' + for line in database.gcpolicy.gc_startup_code(): + print >> f,"\t" + line + # put float infinities in global constants, we should not have so many of them for now to make # a table+loop preferable for dest, value in database.late_initializations: diff --git a/rpython/translator/c/src/dtoa.c b/rpython/translator/c/src/dtoa.c --- a/rpython/translator/c/src/dtoa.c +++ b/rpython/translator/c/src/dtoa.c @@ -2969,7 +2969,8 @@ result = __Py_dg_dtoa(dd, mode, ndigits, decpt, sign, rve); _PyPy_SET_53BIT_PRECISION_END; #ifdef RPY_STM - stm_call_on_abort(result, (void(*)(void *))_PyPy_dg_freedtoa); + stm_call_on_abort(&stm_thread_local, result, + (void(*)(void *))_PyPy_dg_freedtoa); #endif return result; } @@ -2977,7 +2978,7 @@ void _PyPy_dg_freedtoa(char *s) { #ifdef RPY_STM - stm_call_on_abort(s, NULL); + stm_call_on_abort(&stm_thread_local, s, NULL); #endif __Py_dg_freedtoa(s); } diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -13,8 +13,9 @@ #ifdef RPY_STM void _pypy_stm_free(void *); -#define _OP_RAW_MALLOCED(r) stm_call_on_abort(r, _pypy_stm_free) -#define _OP_RAW_STM_UNREGISTER(r) stm_call_on_abort(r, NULL) +#define _OP_RAW_MALLOCED(r) stm_call_on_abort(&stm_thread_local, r, \ + _pypy_stm_free) +#define _OP_RAW_STM_UNREGISTER(r) stm_call_on_abort(&stm_thread_local, r, NULL) #else #define _OP_RAW_MALLOCED(r) /* nothing */ #define _OP_RAW_STM_UNREGISTER(r) /* nothing */ From noreply at buildbot.pypy.org Tue Mar 11 10:50:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 10:50:50 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: DEBUGFLAGS contains -O1, so it will override this pointless -O0 here Message-ID: <20140311095050.6F8901C0686@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69857:73c222ddbca5 Date: 2014-03-11 10:41 +0100 http://bitbucket.org/pypy/pypy/changeset/73c222ddbca5/ Log: DEBUGFLAGS contains -O1, so it will override this pointless -O0 here diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -457,7 +457,7 @@ ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPYPY_USE_LINUXMEMCHK" debug_target'), ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(TARGET)'), ('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT -DRPY_STM_ASSERT" debug_target'), - ('lldebug0','', '$(MAKE) CFLAGS="-O0 $(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT -DRPY_STM_ASSERT" debug_target'), + ('lldebug0','', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -O0 -DRPY_ASSERT -DRPY_LL_ASSERT -DRPY_STM_ASSERT" debug_target'), ('profile', '', '$(MAKE) CFLAGS="-g -O1 -pg $(CFLAGS) -fno-omit-frame-pointer" LDFLAGS="-pg $(LDFLAGS)" $(TARGET)'), ] if self.has_profopt(): From noreply at buildbot.pypy.org Tue Mar 11 18:11:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 18:11:14 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Write a minimal "readbarrier.py". Message-ID: <20140311171114.85E161C0128@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69868:1783f387a615 Date: 2014-03-11 18:01 +0100 http://bitbucket.org/pypy/pypy/changeset/1783f387a615/ Log: Write a minimal "readbarrier.py". diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -53,6 +53,7 @@ hop.genop("stm_pop_root_into", [var]) def transform_generic_set(self, hop): + # XXX detect if we're inside a 'stm_ignored' block and... do what? assert self.write_barrier_ptr == "stm" opname = hop.spaceop.opname v_struct = hop.spaceop.args[0] diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py --- a/rpython/translator/stm/breakfinder.py +++ b/rpython/translator/stm/breakfinder.py @@ -1,17 +1,21 @@ from rpython.translator.backendopt import graphanalyze +from rpython.translator.stm import funcgen TRANSACTION_BREAK = set([ 'stm_commit_transaction', - 'stm_begin_inevitable_transaction', - 'stm_perform_transaction', - 'stm_partial_commit_and_resume_other_threads', # new priv_revision - 'jit_assembler_call', - 'jit_stm_transaction_break_point', + 'stm_start_inevitable_transaction', + #'stm_perform_transaction', + #'stm_partial_commit_and_resume_other_threads', # new priv_revision + #'jit_assembler_call', + #'jit_stm_transaction_break_point', 'stm_enter_callback_call', 'stm_leave_callback_call', ]) +for tb in TRANSACTION_BREAK: + assert hasattr(funcgen, tb) + class TransactionBreakAnalyzer(graphanalyze.BoolGraphAnalyzer): diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/readbarrier.py @@ -0,0 +1,31 @@ +from rpython.flowspace.model import SpaceOperation +from rpython.translator.unsimplify import varoftype +from rpython.rtyper.lltypesystem import lltype + + +READ_OPS = set(['getfield', 'getarrayitem', 'getinteriorfield', 'raw_load']) + + +def is_gc_ptr(T): + return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc' + + +def insert_stm_read_barrier(transformer, graph): + # We need to put enough 'stm_read' in the graph so that any + # execution of a READ_OP on some GC object is guaranteed to also + # execute either 'stm_read' or 'stm_write' on the same GC object + # during the same transaction. + # + # XXX this can be optimized a lot, but for now we go with the + # simplest possible solution... + # + for block in graph.iterblocks(): + if not block.operations: + continue + newops = [] + for op in block.operations: + if op.opname in READ_OPS and is_gc_ptr(op.args[0].concretetype): + v_none = varoftype(lltype.Void) + newops.append(SpaceOperation('stm_read', [op.args[0]], v_none)) + newops.append(op) + block.operations = newops diff --git a/rpython/translator/stm/test/test_readbarrier.py b/rpython/translator/stm/test/test_readbarrier.py new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/test/test_readbarrier.py @@ -0,0 +1,26 @@ +from rpython.translator.stm.test.transform_support import BaseTestTransform +from rpython.rtyper.lltypesystem import lltype + + +class TestReadBarrier(BaseTestTransform): + do_read_barrier = True + + def test_simple_read(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + x1 = lltype.malloc(X, immortal=True) + x1.foo = 42 + x2 = lltype.malloc(X, immortal=True) + x2.foo = 81 + + def f1(n): + if n > 1: + return x2.foo + else: + return x1.foo + + res = self.interpret(f1, [4]) + assert res == 81 + assert self.read_barriers == [x2] + res = self.interpret(f1, [-5]) + assert res == 42 + assert self.read_barriers == [x1] diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -2,7 +2,6 @@ from rpython.rtyper.llinterp import LLFrame from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache from rpython.translator.stm.transform import STMTransformer -from rpython.translator.stm.writebarrier import needs_barrier from rpython.conftest import option @@ -21,13 +20,12 @@ class BaseTestTransform(object): - do_write_barrier = False + do_read_barrier = False do_turn_inevitable = False do_jit_driver = False def build_state(self): - self.writemode = set() - self.barriers = [] + self.read_barriers = [] def get_category_or_null(self, p): if isinstance(p, _stmptr): @@ -41,7 +39,8 @@ def interpret(self, fn, args, gcremovetypeptr=False, run=True): self.build_state() clear_tcache() - interp, self.graph = get_interpreter(fn, args, view=False) + interp, self.graph = get_interpreter(fn, args, view=False, + viewbefore=False) interp.tester = self interp.frame_class = LLSTMFrame # @@ -50,8 +49,8 @@ self.stmtransformer = STMTransformer(self.translator) if self.do_jit_driver: self.stmtransformer.transform_jit_driver() - if self.do_write_barrier: - self.stmtransformer.transform_write_barrier() + if self.do_read_barrier: + self.stmtransformer.transform_read_barrier() if self.do_turn_inevitable: self.stmtransformer.transform_turn_inevitable() if option.view: @@ -68,64 +67,39 @@ class LLSTMFrame(LLFrame): stm_ignored = False + def eval(self): + self.gcptrs_actually_read = [] + result = LLFrame.eval(self) + for x in self.gcptrs_actually_read: + assert x in self.llinterpreter.tester.read_barriers + return result + def all_stm_ptrs(self): for frame in self.llinterpreter.frame_stack: for value in frame.bindings.values(): if isinstance(value, _stmptr): yield value - def get_category_or_null(self, p): - return self.llinterpreter.tester.get_category_or_null(p) + def op_stm_read(self, obj): + self.llinterpreter.tester.read_barriers.append(obj) - def check_category(self, p, expected): - cat = self.get_category_or_null(p) - assert cat is None or cat in 'AIQRVW' - if expected is not None: - if self.stm_ignored: - if expected >= 'W': - raise AssertionError("should not be seen in 'stm_ignored'") - if expected > 'I': - expected = 'I' - assert cat is not None and cat >= expected - return cat - - def op_stm_barrier(self, kind, obj): - frm, middledigit, to = kind - assert middledigit == '2' - cat = self.check_category(obj, frm) - if not needs_barrier(cat, to): - # a barrier, but with no effect - self.llinterpreter.tester.barriers.append(kind.lower()) - return obj - else: - # a barrier, calling a helper - ptr2 = _stmptr(obj, to) - if to >= 'V': - self.llinterpreter.tester.writemode.add(ptr2._obj) - self.llinterpreter.tester.barriers.append(kind) - return ptr2 + def op_stm_write(self, obj): + self.op_stm_read(obj) # implicitly counts as a read barrier too def op_stm_ignored_start(self): + xxx assert self.stm_ignored == False self.stm_ignored = True def op_stm_ignored_stop(self): + xxx assert self.stm_ignored == True self.stm_ignored = False - def op_stm_ptr_eq(self, obj1, obj2): - self.check_category(obj1, None) - self.check_category(obj2, None) - self.llinterpreter.tester.barriers.append('=') - return obj1 == obj2 - def op_getfield(self, obj, field): if obj._TYPE.TO._gckind == 'gc': if obj._TYPE.TO._immutable_field(field): - expected = 'I' - else: - expected = 'R' - self.check_category(obj, expected) + self.gcptrs_actually_read.append(obj) return LLFrame.op_getfield(self, obj, field) def op_setfield(self, obj, fieldname, fieldvalue): diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform.py +++ b/rpython/translator/stm/transform.py @@ -1,4 +1,5 @@ from rpython.translator.stm.inevitable import insert_turn_inevitable +from rpython.translator.stm.readbarrier import insert_stm_read_barrier from rpython.translator.stm.jitdriver import reorganize_around_jit_driver from rpython.translator.stm.threadlocalref import transform_tlref from rpython.translator.c.support import log @@ -13,6 +14,7 @@ assert not hasattr(self.translator, 'stm_transformation_applied') self.start_log() self.transform_jit_driver() + self.transform_read_barrier() self.transform_turn_inevitable() self.print_logs() self.translator.stm_transformation_applied = True @@ -21,6 +23,12 @@ self.transform_threadlocalref() self.print_logs_after_gc() + def transform_read_barrier(self): + self.read_barrier_counts = 0 + for graph in self.translator.graphs: + insert_stm_read_barrier(self, graph) + log("%d read barriers inserted" % (self.read_barrier_counts,)) + def transform_turn_inevitable(self): for graph in self.translator.graphs: insert_turn_inevitable(graph) From noreply at buildbot.pypy.org Tue Mar 11 18:11:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 18:11:16 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Insert the stm_read() barriers at the correct time Message-ID: <20140311171116.21D621C0128@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69869:de414b61f2d7 Date: 2014-03-11 18:10 +0100 http://bitbucket.org/pypy/pypy/changeset/de414b61f2d7/ Log: Insert the stm_read() barriers at the correct time diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -413,6 +413,7 @@ # must be no gc-var access afterwards anyway) 'stm_register_thread_local': LLOp(), 'stm_unregister_thread_local': LLOp(), + 'stm_read': LLOp(), 'stm_write': LLOp(), 'stm_can_move': LLOp(), 'stm_allocate_tid': LLOp(sideeffects=False, canmallocgc=True), diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -160,9 +160,6 @@ sandbox=self.config.translation.sandbox) self.db = db - if self.config.translation.stm: - stmtransformer.transform_after_gc() - # give the gc a chance to register interest in the start-up functions it # need (we call this for its side-effects of db.get()) list(db.gcpolicy.gc_startup_code()) @@ -187,6 +184,9 @@ exports.clear() db.complete() + if self.config.translation.stm: + stmtransformer.transform_after_gc() + self.collect_compilation_info(db) return db diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -52,6 +52,12 @@ def stm_unregister_thread_local(funcgen, op): return 'stm_unregister_thread_local(&stm_thread_local);' +def stm_read(funcgen, op): + assert isinstance(op.args[0].concretetype, lltype.Ptr) + assert op.args[0].concretetype.TO._gckind == 'gc' + arg0 = funcgen.expr(op.args[0]) + return 'stm_read((object_t *)%s);' % (arg0,) + def stm_write(funcgen, op): assert isinstance(op.args[0].concretetype, lltype.Ptr) assert op.args[0].concretetype.TO._gckind == 'gc' diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -27,5 +27,6 @@ if op.opname in READ_OPS and is_gc_ptr(op.args[0].concretetype): v_none = varoftype(lltype.Void) newops.append(SpaceOperation('stm_read', [op.args[0]], v_none)) + transformer.read_barrier_counts += 1 newops.append(op) block.operations = newops diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform.py +++ b/rpython/translator/stm/transform.py @@ -12,22 +12,23 @@ def transform(self): assert not hasattr(self.translator, 'stm_transformation_applied') - self.start_log() + self.start_log(1) self.transform_jit_driver() - self.transform_read_barrier() self.transform_turn_inevitable() - self.print_logs() + self.print_logs(1) self.translator.stm_transformation_applied = True def transform_after_gc(self): + self.start_log(2) self.transform_threadlocalref() - self.print_logs_after_gc() + self.transform_read_barrier() + self.print_logs(2) def transform_read_barrier(self): self.read_barrier_counts = 0 for graph in self.translator.graphs: insert_stm_read_barrier(self, graph) - log("%d read barriers inserted" % (self.read_barrier_counts,)) + log.info("%d read barriers inserted" % (self.read_barrier_counts,)) def transform_turn_inevitable(self): for graph in self.translator.graphs: @@ -38,13 +39,13 @@ reorganize_around_jit_driver(self, graph) def transform_threadlocalref(self): + return #XXX XXX XXX transform_tlref(self.translator) - def start_log(self): - log.info("Software Transactional Memory transformation") + def start_log(self, step): + log.info("Software Transactional Memory transformation, step %d" + % step) - def print_logs(self): - log.info("Software Transactional Memory transformation applied") - - def print_logs_after_gc(self): - log.info("Software Transactional Memory transformation-after-gc done") + def print_logs(self, step): + log.info("Software Transactional Memory transformation, step %d, " + "applied" % step) From noreply at buildbot.pypy.org Tue Mar 11 18:17:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 18:17:02 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: For now, ignore destructors with stm Message-ID: <20140311171702.F362E1C0128@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69870:92e19792822d Date: 2014-03-11 18:16 +0100 http://bitbucket.org/pypy/pypy/changeset/92e19792822d/ Log: For now, ignore destructors with stm diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1237,6 +1237,8 @@ super(TransformerLayoutBuilder, self).__init__(GCClass, lltype2vtable) def has_finalizer(self, TYPE): + if self.translator.config.translation.stm: + return False # XXX no finalizer support for now rtti = get_rtti(TYPE) return rtti is not None and getattr(rtti._obj, 'destructor_funcptr', None) From noreply at buildbot.pypy.org Tue Mar 11 19:06:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Mar 2014 19:06:14 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Redo thread-local objects Message-ID: <20140311180614.8063D1C0320@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69871:a59d46c8f5c6 Date: 2014-03-11 19:04 +0100 http://bitbucket.org/pypy/pypy/changeset/a59d46c8f5c6/ Log: Redo thread-local objects diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -433,6 +433,11 @@ 'stm_leave_callback_call':LLOp(), 'stm_should_break_transaction': LLOp(sideeffects=False), 'stm_set_transaction_length': LLOp(), + 'stm_threadlocalref_get': LLOp(sideeffects=False), + 'stm_threadlocalref_set': LLOp(canmallocgc=True), # may allocate new array, + # see threadlocalref.py + 'stm_threadlocal_get': LLOp(sideeffects=False), + 'stm_threadlocal_set': LLOp(), ## 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), ## 'stm_become_inevitable': LLOp(canmallocgc=True), diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -160,6 +160,9 @@ sandbox=self.config.translation.sandbox) self.db = db + if self.config.translation.stm: + stmtransformer.transform_after_gc() + # give the gc a chance to register interest in the start-up functions it # need (we call this for its side-effects of db.get()) list(db.gcpolicy.gc_startup_code()) @@ -185,7 +188,7 @@ db.complete() if self.config.translation.stm: - stmtransformer.transform_after_gc() + stmtransformer.transform_after_complete() self.collect_compilation_info(db) return db diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -158,6 +158,15 @@ arg0 = funcgen.expr(op.args[0]) return 'pypy_stm_set_transaction_length(%s);' % (arg0,) +def stm_threadlocal_get(funcgen, op): + result = funcgen.expr(op.result) + return '%s = (%s)stm_thread_local.thread_local_obj;' % ( + result, cdecl(funcgen.lltypename(op.result), '')) + +def stm_threadlocal_set(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + return 'stm_thread_local.thread_local_obj = (object_t *)%s;' % (arg0,) + ##def stm_initialize(funcgen, op): ## return '''stm_initialize(); diff --git a/rpython/translator/stm/threadlocalref.py b/rpython/translator/stm/threadlocalref.py --- a/rpython/translator/stm/threadlocalref.py +++ b/rpython/translator/stm/threadlocalref.py @@ -1,4 +1,5 @@ from rpython.annotator import model as annmodel +from rpython.rtyper import llannotation from rpython.rtyper import annlowlevel from rpython.rtyper.lltypesystem import lltype, rclass from rpython.rtyper.lltypesystem.lloperation import llop @@ -28,7 +29,10 @@ if not array: return lltype.nullptr(rclass.OBJECTPTR.TO) else: - array = llop.stm_barrier(lltype.Ptr(ARRAY), 'A2R', array) + llop.stm_read(lltype.Void, array) + # ^^^ might not actually be needed, because this array is + # only ever seen from the current transaction; but better + # safe than sorry return array[index] # def ll_threadlocalref_set(index, newvalue): @@ -37,15 +41,18 @@ array = lltype.malloc(ARRAY, total) # llop may allocate! llop.stm_threadlocal_set(lltype.Void, array) else: - array = llop.stm_barrier(lltype.Ptr(ARRAY), 'A2W', array) - # invalidating other barriers after an llop.threadlocalref_set - # is not necessary since no other variable should contain - # a reference to stm_threadlocal_obj + llop.stm_write(lltype.Void, array) + # ^^^ might not actually be needed, because this array is + # only ever seen from the current transaction; but better + # safe than sorry + # invalidating other barriers after an llop.threadlocalref_set + # is not necessary since no other variable should contain + # a reference to stm_threadlocal_obj array[index] = newvalue # annhelper = annlowlevel.MixLevelHelperAnnotator(t.rtyper) s_Int = annmodel.SomeInteger() - s_Ptr = annmodel.SomePtr(rclass.OBJECTPTR) + s_Ptr = llannotation.SomePtr(rclass.OBJECTPTR) c_getter_ptr = annhelper.constfunc(ll_threadlocalref_get, [s_Int], s_Ptr) c_setter_ptr = annhelper.constfunc(ll_threadlocalref_set, diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform.py +++ b/rpython/translator/stm/transform.py @@ -21,8 +21,12 @@ def transform_after_gc(self): self.start_log(2) self.transform_threadlocalref() + self.print_logs(2) + + def transform_after_complete(self): + self.start_log(3) self.transform_read_barrier() - self.print_logs(2) + self.print_logs(3) def transform_read_barrier(self): self.read_barrier_counts = 0 @@ -39,7 +43,6 @@ reorganize_around_jit_driver(self, graph) def transform_threadlocalref(self): - return #XXX XXX XXX transform_tlref(self.translator) def start_log(self, step): From noreply at buildbot.pypy.org Tue Mar 11 21:24:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 11 Mar 2014 21:24:11 +0100 (CET) Subject: [pypy-commit] pypy default: use modern py3k compat. syntax when we can: byte literals, except exc as, Message-ID: <20140311202411.7EE6D1C0128@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69872:76e1b1d43ca4 Date: 2014-03-11 13:08 -0700 http://bitbucket.org/pypy/pypy/changeset/76e1b1d43ca4/ Log: use modern py3k compat. syntax when we can: byte literals, except exc as, str(exc) and sys.maxsize vs sys.maxint diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -489,24 +489,24 @@ def test_compile_error_message(self): import re compile('# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') - compile('\xef\xbb\xbf\n', 'dummy', 'exec') - compile('\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec') + compile(b'\xef\xbb\xbf\n', 'dummy', 'exec') + compile(b'\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec') exc = raises(SyntaxError, compile, - '# -*- coding: fake -*-\n', 'dummy', 'exec') - assert 'fake' in exc.value[0] + b'# -*- coding: fake -*-\n', 'dummy', 'exec') + assert 'fake' in str(exc.value) exc = raises(SyntaxError, compile, - '\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') - assert 'iso-8859-15' in exc.value[0] - assert 'BOM' in exc.value[0] + b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') + assert 'iso-8859-15' in str(exc.value), str(exc.value) + assert 'BOM' in str(exc.value) exc = raises(SyntaxError, compile, - '\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec') - assert 'fake' in exc.value[0] - assert 'BOM' in exc.value[0] + b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec') + assert 'fake' in str(exc.value) + assert 'BOM' in str(exc.value) def test_unicode_compile(self): try: compile(u'-', '?', 'eval') - except SyntaxError, e: + except SyntaxError as e: assert e.lineno == 1 def test_unicode_encoding_compile(self): diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -216,7 +216,7 @@ def test_flush_error_on_close(self): import _io - txt = _io.TextIOWrapper(_io.BytesIO(""), encoding="ascii") + txt = _io.TextIOWrapper(_io.BytesIO(b""), encoding="ascii") def bad_flush(): raise IOError() txt.flush = bad_flush diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -36,15 +36,15 @@ raises(TypeError, fcntl.fcntl, "foo") raises(TypeError, fcntl.fcntl, f, "foo") exc = raises(TypeError, fcntl.fcntl, F("foo"), 1) - assert exc.value[0] == 'fileno() returned a non-integer' + assert str(exc.value) == 'fileno() returned a non-integer' exc = raises(ValueError, fcntl.fcntl, 2147483647 + 1, 1, 0) - assert exc.value[0] == 'file descriptor cannot be a negative integer (-1)' + assert str(exc.value) == 'file descriptor cannot be a negative integer (-1)' exc = raises(ValueError, fcntl.fcntl, F(2147483647 + 1), 1, 0) - assert exc.value[0] == 'file descriptor cannot be a negative integer (-1)' + assert str(exc.value) == 'file descriptor cannot be a negative integer (-1)' exc = raises(ValueError, fcntl.fcntl, -2147483648 - 1, 1, 0) - assert exc.value[0] == 'file descriptor cannot be a negative integer (-1)' + assert str(exc.value) == 'file descriptor cannot be a negative integer (-1)' exc = raises(ValueError, fcntl.fcntl, F(-2147483648 - 1), 1, 0) - assert exc.value[0] == 'file descriptor cannot be a negative integer (-1)' + assert str(exc.value) == 'file descriptor cannot be a negative integer (-1)' raises(ValueError, fcntl.fcntl, -1, 1, 0) raises(ValueError, fcntl.fcntl, F(-1), 1, 0) raises(ValueError, fcntl.fcntl, F(long(-1)), 1, 0) diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -843,6 +843,6 @@ m.close() assert False, "should not have been able to mmap empty file" except ValueError as e: - assert e.message == "cannot mmap an empty file" + assert str(e) == "cannot mmap an empty file" except BaseException as e: assert False, "unexpected exception: " + str(e) diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -24,7 +24,7 @@ assert type(pw.pw_gid) is int raises(TypeError, pwd.getpwuid) raises(TypeError, pwd.getpwuid, 3.14) - raises(KeyError, pwd.getpwuid, sys.maxint) + raises(KeyError, pwd.getpwuid, sys.maxsize) # -1 is allowed, cast to uid_t exc = raises(KeyError, pwd.getpwuid, -1) m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value[0]) diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -219,15 +219,15 @@ pollster = select.poll() pollster.register(1) exc = raises(OverflowError, pollster.register, 0, 32768) # SHRT_MAX + 1 - assert exc.value[0] == 'signed short integer is greater than maximum' + assert str(exc.value) == 'signed short integer is greater than maximum' exc = raises(OverflowError, pollster.register, 0, -32768 - 1) - assert exc.value[0] == 'signed short integer is less than minimum' + assert str(exc.value) == 'signed short integer is less than minimum' raises(OverflowError, pollster.register, 0, 65535) # USHRT_MAX + 1 raises(OverflowError, pollster.poll, 2147483648) # INT_MAX + 1 raises(OverflowError, pollster.poll, -2147483648 - 1) raises(OverflowError, pollster.poll, 4294967296) # UINT_MAX + 1 exc = raises(TypeError, pollster.poll, '123') - assert exc.value[0] == 'timeout must be an integer or None' + assert str(exc.value) == 'timeout must be an integer or None' class AppTestSelectWithPipes(_AppTestSelect): @@ -277,7 +277,7 @@ pollster.unregister(fd) pollster.register(w, select.POLLOUT) exc = raises(RuntimeError, pollster.poll) - assert exc.value[0] == 'concurrent poll() invocation' + assert str(exc.value) == 'concurrent poll() invocation' finally: # and make the call to poll() from the thread return os.write(w, b'spam') diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -166,9 +166,9 @@ raises(ValueError, zlib.decompressobj().flush, -1) raises(TypeError, zlib.decompressobj().flush, None) raises(OverflowError, zlib.decompressobj().flush, 2**31) - raises(ValueError, zlib.decompressobj().decompress, 'abc', -1) - raises(TypeError, zlib.decompressobj().decompress, 'abc', None) - raises(OverflowError, zlib.decompressobj().decompress, 'abc', 2**31) + raises(ValueError, zlib.decompressobj().decompress, b'abc', -1) + raises(TypeError, zlib.decompressobj().decompress, b'abc', None) + raises(OverflowError, zlib.decompressobj().decompress, b'abc', 2**31) raises(TypeError, self.zlib.decompress, self.compressed, None) raises(OverflowError, self.zlib.decompress, self.compressed, 2**31) @@ -177,21 +177,21 @@ co = zlib.compressobj(zlib.Z_BEST_COMPRESSION) assert co.flush() # Returns a zlib header dco = zlib.decompressobj() - assert dco.flush() == "" + assert dco.flush() == b"" def test_decompress_incomplete_stream(self): import zlib # This is 'foo', deflated - x = 'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' + x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # For the record - assert zlib.decompress(x) == 'foo' + assert zlib.decompress(x) == b'foo' raises(zlib.error, zlib.decompress, x[:-5]) # Omitting the stream end works with decompressor objects # (see issue #8672). dco = zlib.decompressobj() y = dco.decompress(x[:-5]) y += dco.flush() - assert y == 'foo' + assert y == b'foo' def test_unused_data(self): """ @@ -256,13 +256,13 @@ def test_flush_with_freed_input(self): # Issue #16411: decompressor accesses input to last decompress() call # in flush(), even if this object has been freed in the meanwhile. - input1 = 'abcdefghijklmnopqrstuvwxyz' - input2 = 'QWERTYUIOPASDFGHJKLZXCVBNM' + input1 = b'abcdefghijklmnopqrstuvwxyz' + input2 = b'QWERTYUIOPASDFGHJKLZXCVBNM' data = self.zlib.compress(input1) dco = self.zlib.decompressobj() dco.decompress(data, 1) del data data = self.zlib.compress(input2) assert dco.flush(1) == input1[1:] - assert dco.unused_data == '' - assert dco.unconsumed_tail == '' + assert dco.unused_data == b'' + assert dco.unconsumed_tail == b'' From noreply at buildbot.pypy.org Tue Mar 11 21:24:13 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 11 Mar 2014 21:24:13 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: merge default Message-ID: <20140311202413.8D9921C0128@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69873:41b16b2d6913 Date: 2014-03-11 13:21 -0700 http://bitbucket.org/pypy/pypy/changeset/41b16b2d6913/ Log: merge default diff --git a/lib-python/2.7/test/test_zipfile.py b/lib-python/2.7/test/test_zipfile.py --- a/lib-python/2.7/test/test_zipfile.py +++ b/lib-python/2.7/test/test_zipfile.py @@ -19,7 +19,7 @@ from unittest import skipUnless from test.test_support import TESTFN, TESTFN_UNICODE, TESTFN_ENCODING, \ - run_unittest, findfile, unlink + run_unittest, findfile, unlink, rmtree try: TESTFN_UNICODE.encode(TESTFN_ENCODING) except (UnicodeError, TypeError): @@ -365,7 +365,8 @@ produces the expected result.""" with zipfile.ZipFile(TESTFN2, "w") as zipfp: zipfp.write(TESTFN) - self.assertEqual(zipfp.read(TESTFN), open(TESTFN).read()) + with open(TESTFN,'r') as fid: + self.assertEqual(zipfp.read(TESTFN), fid.read()) @skipUnless(zlib, "requires zlib") def test_per_file_compression(self): @@ -404,11 +405,12 @@ self.assertEqual(writtenfile, correctfile) # make sure correct data is in correct file - self.assertEqual(fdata, open(writtenfile, "rb").read()) + with open(writtenfile, "rb") as fid: + self.assertEqual(fdata, fid.read()) os.remove(writtenfile) # remove the test file subdirectories - shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) + rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) def test_extract_all(self): with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp: @@ -420,11 +422,12 @@ for fpath, fdata in SMALL_TEST_DATA: outfile = os.path.join(os.getcwd(), fpath) - self.assertEqual(fdata, open(outfile, "rb").read()) + with open(outfile, "rb") as fid: + self.assertEqual(fdata, fid.read()) os.remove(outfile) # remove the test file subdirectories - shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) + rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) def check_file(self, filename, content): self.assertTrue(os.path.isfile(filename)) @@ -509,12 +512,12 @@ self.assertEqual(writtenfile, correctfile, msg="extract %r" % arcname) self.check_file(correctfile, content) - shutil.rmtree('target') + rmtree('target') with zipfile.ZipFile(TESTFN2, 'r') as zipfp: zipfp.extractall(targetpath) self.check_file(correctfile, content) - shutil.rmtree('target') + rmtree('target') correctfile = os.path.join(os.getcwd(), *fixedname.split('/')) @@ -523,12 +526,12 @@ self.assertEqual(writtenfile, correctfile, msg="extract %r" % arcname) self.check_file(correctfile, content) - shutil.rmtree(fixedname.split('/')[0]) + rmtree(fixedname.split('/')[0]) with zipfile.ZipFile(TESTFN2, 'r') as zipfp: zipfp.extractall() self.check_file(correctfile, content) - shutil.rmtree(fixedname.split('/')[0]) + rmtree(fixedname.split('/')[0]) os.remove(TESTFN2) @@ -773,11 +776,12 @@ self.assertNotIn('mod2.txt', names) finally: - shutil.rmtree(TESTFN2) + rmtree(TESTFN2) def test_write_non_pyfile(self): with zipfile.PyZipFile(TemporaryFile(), "w") as zipfp: - open(TESTFN, 'w').write('most definitely not a python file') + with open(TESTFN, 'w') as fid: + fid.write('most definitely not a python file') self.assertRaises(RuntimeError, zipfp.writepy, TESTFN) os.remove(TESTFN) @@ -940,8 +944,9 @@ self.assertRaises(RuntimeError, zipf.open, "foo.txt") self.assertRaises(RuntimeError, zipf.testzip) self.assertRaises(RuntimeError, zipf.writestr, "bogus.txt", "bogus") - open(TESTFN, 'w').write('zipfile test data') - self.assertRaises(RuntimeError, zipf.write, TESTFN) + with open(TESTFN, 'w') as fid: + fid.write('zipfile test data') + self.assertRaises(RuntimeError, zipf.write, TESTFN) def test_bad_constructor_mode(self): """Check that bad modes passed to ZipFile constructor are caught.""" @@ -1126,6 +1131,7 @@ pass try: zipf = zipfile.ZipFile(TESTFN, mode="r") + zipf.close() except zipfile.BadZipfile: self.fail("Unable to create empty ZIP file in 'w' mode") @@ -1133,6 +1139,7 @@ pass try: zipf = zipfile.ZipFile(TESTFN, mode="r") + zipf.close() except: self.fail("Unable to create empty ZIP file in 'a' mode") @@ -1329,12 +1336,11 @@ # Verify that (when the ZipFile is in control of creating file objects) # multiple open() calls can be made without interfering with each other. with zipfile.ZipFile(TESTFN2, mode="r") as zipf: - zopen1 = zipf.open('ones') - zopen2 = zipf.open('ones') - data1 = zopen1.read(500) - data2 = zopen2.read(500) - data1 += zopen1.read(500) - data2 += zopen2.read(500) + with zipf.open('ones') as zopen1, zipf.open('ones') as zopen2: + data1 = zopen1.read(500) + data2 = zopen2.read(500) + data1 += zopen1.read(500) + data2 += zopen2.read(500) self.assertEqual(data1, data2) def test_different_file(self): @@ -1369,8 +1375,8 @@ zipf.read('ones') with zipf.open('ones') as zopen1: pass - for x in range(10): - self.assertLess(open('/dev/null').fileno(), 100) + with open(os.devnull) as f: + self.assertLess(f.fileno(), 100) def tearDown(self): unlink(TESTFN2) @@ -1394,12 +1400,12 @@ def test_store_dir(self): os.mkdir(os.path.join(TESTFN2, "x")) - zipf = zipfile.ZipFile(TESTFN, "w") - zipf.write(os.path.join(TESTFN2, "x"), "x") - self.assertTrue(zipf.filelist[0].filename.endswith("x/")) + with zipfile.ZipFile(TESTFN, "w") as zipf: + zipf.write(os.path.join(TESTFN2, "x"), "x") + self.assertTrue(zipf.filelist[0].filename.endswith("x/")) def tearDown(self): - shutil.rmtree(TESTFN2) + rmtree(TESTFN2) if os.path.exists(TESTFN): unlink(TESTFN) @@ -1413,7 +1419,8 @@ for n, s in enumerate(self.seps): self.arcdata[s] = s.join(self.line_gen) + s self.arcfiles[s] = '%s-%d' % (TESTFN, n) - open(self.arcfiles[s], "wb").write(self.arcdata[s]) + with open(self.arcfiles[s], "wb") as fid: + fid.write(self.arcdata[s]) def make_test_archive(self, f, compression): # Create the ZIP archive @@ -1482,8 +1489,9 @@ # Read the ZIP archive with zipfile.ZipFile(f, "r") as zipfp: for sep, fn in self.arcfiles.items(): - for line, zipline in zip(self.line_gen, zipfp.open(fn, "rU")): - self.assertEqual(zipline, line + '\n') + with zipfp.open(fn, "rU") as fid: + for line, zipline in zip(self.line_gen, fid): + self.assertEqual(zipline, line + '\n') def test_read_stored(self): for f in (TESTFN2, TemporaryFile(), StringIO()): diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -328,17 +328,16 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisvalue = args.pop(0) + thisvalue = args[0] thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) keepalives, newargs, argtypes, outargs, errcheckargs = ( - self._convert_args(argtypes, args, kwargs)) - args.insert(0, thisvalue) + self._convert_args(argtypes, args[1:], kwargs)) newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None keepalives, newargs, argtypes, outargs, errcheckargs = ( - self._convert_args(argtypes, args, kwargs)) + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -489,24 +489,24 @@ def test_compile_error_message(self): import re compile('# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') - compile('\xef\xbb\xbf\n', 'dummy', 'exec') - compile('\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec') + compile(b'\xef\xbb\xbf\n', 'dummy', 'exec') + compile(b'\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec') exc = raises(SyntaxError, compile, - '# -*- coding: fake -*-\n', 'dummy', 'exec') - assert 'fake' in exc.value[0] + b'# -*- coding: fake -*-\n', 'dummy', 'exec') + assert 'fake' in str(exc.value) exc = raises(SyntaxError, compile, - '\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') - assert 'iso-8859-15' in exc.value[0] - assert 'BOM' in exc.value[0] + b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') + assert 'iso-8859-15' in str(exc.value), str(exc.value) + assert 'BOM' in str(exc.value) exc = raises(SyntaxError, compile, - '\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec') - assert 'fake' in exc.value[0] - assert 'BOM' in exc.value[0] + b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec') + assert 'fake' in str(exc.value) + assert 'BOM' in str(exc.value) def test_unicode_compile(self): try: compile(u'-', '?', 'eval') - except SyntaxError, e: + except SyntaxError as e: assert e.lineno == 1 def test_unicode_encoding_compile(self): diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -1,7 +1,7 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rstring import UnicodeBuilder -from rpython.rlib.runicode import UNICHR +from rpython.rlib.runicode import code_to_unichr, MAXUNICODE from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault @@ -247,9 +247,15 @@ builder = UnicodeBuilder() pos = start while pos < end: - ch = obj[pos] + code = ord(obj[pos]) + if (MAXUNICODE == 0xffff and 0xD800 <= code <= 0xDBFF and + pos + 1 < end and 0xDC00 <= ord(obj[pos+1]) <= 0xDFFF): + code = (code & 0x03FF) << 10 + code |= ord(obj[pos+1]) & 0x03FF + code += 0x10000 + pos += 1 builder.append(u"&#") - builder.append(unicode(str(ord(ch)))) + builder.append(unicode(str(code))) builder.append(u";") pos += 1 return space.newtuple([space.wrap(builder.build()), w_end]) @@ -658,7 +664,7 @@ if not 0 <= x <= 0x10FFFF: raise oefmt(space.w_TypeError, "character mapping must be in range(0x110000)") - return UNICHR(x) + return code_to_unichr(x) elif space.is_w(w_ch, space.w_None): # Charmap may return None return errorchar diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -339,7 +339,7 @@ def test_flush_error_on_close(self): import _io - txt = _io.TextIOWrapper(_io.BytesIO(""), encoding="ascii") + txt = _io.TextIOWrapper(_io.BytesIO(b""), encoding="ascii") def bad_flush(): raise IOError() txt.flush = bad_flush diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -449,6 +449,11 @@ def build_exported_objects(): # Standard exceptions + # PyExc_BaseException, PyExc_Exception, PyExc_ValueError, PyExc_KeyError, + # PyExc_IndexError, PyExc_IOError, PyExc_OSError, PyExc_TypeError, + # PyExc_AttributeError, PyExc_OverflowError, PyExc_ImportError, + # PyExc_NameError, PyExc_MemoryError, PyExc_RuntimeError, + # PyExc_UnicodeEncodeError, PyExc_UnicodeDecodeError, ... for exc_name in exceptions.Module.interpleveldefs.keys(): GLOBALS['PyExc_' + exc_name] = ( 'PyTypeObject*', @@ -456,39 +461,40 @@ # Common types with their own struct for cpyname, pypyexpr in { - "Type": "space.w_type", - "String": "space.w_str", - "Unicode": "space.w_unicode", - "Dict": "space.w_dict", - "Tuple": "space.w_tuple", - "List": "space.w_list", - "Set": "space.w_set", - "FrozenSet": "space.w_frozenset", - "Int": "space.w_int", - "Bool": "space.w_bool", - "Float": "space.w_float", - "Long": "space.w_int", - "Complex": "space.w_complex", - "ByteArray": "space.w_bytearray", - "MemoryView": "space.gettypeobject(W_MemoryView.typedef)", - "Array": "space.gettypeobject(W_NDimArray.typedef)", - "BaseObject": "space.w_object", - 'None': 'space.type(space.w_None)', - 'NotImplemented': 'space.type(space.w_NotImplemented)', - 'Cell': 'space.gettypeobject(Cell.typedef)', - 'Module': 'space.gettypeobject(Module.typedef)', - 'Property': 'space.gettypeobject(W_Property.typedef)', - 'Slice': 'space.gettypeobject(W_SliceObject.typedef)', - 'StaticMethod': 'space.gettypeobject(StaticMethod.typedef)', - 'CFunction': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)', - 'WrapperDescr': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)', - 'InstanceMethod': 'space.gettypeobject(cpyext.classobject.InstanceMethod.typedef)', + "PyType_Type": "space.w_type", + "PyString_Type": "space.w_str", + "PyUnicode_Type": "space.w_unicode", + "PyDict_Type": "space.w_dict", + "PyTuple_Type": "space.w_tuple", + "PyList_Type": "space.w_list", + "PySet_Type": "space.w_set", + "PyFrozenSet_Type": "space.w_frozenset", + "PyInt_Type": "space.w_int", + "PyBool_Type": "space.w_bool", + "PyFloat_Type": "space.w_float", + "PyLong_Type": "space.w_int", + "PyComplex_Type": "space.w_complex", + "PyByteArray_Type": "space.w_bytearray", + "PyMemoryView_Type": "space.gettypeobject(W_MemoryView.typedef)", + "PyArray_Type": "space.gettypeobject(W_NDimArray.typedef)", + "PyBaseObject_Type": "space.w_object", + 'PyNone_Type': 'space.type(space.w_None)', + 'PyNotImplemented_Type': 'space.type(space.w_NotImplemented)', + 'PyCell_Type': 'space.gettypeobject(Cell.typedef)', + 'PyModule_Type': 'space.gettypeobject(Module.typedef)', + 'PyProperty_Type': 'space.gettypeobject(W_Property.typedef)', + 'PySlice_Type': 'space.gettypeobject(W_SliceObject.typedef)', + 'PyStaticMethod_Type': 'space.gettypeobject(StaticMethod.typedef)', + 'PyCFunction_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)', + 'PyWrapperDescr_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)' + 'PyInstanceMethod_Type': 'space.gettypeobject(cpyext.classobject.InstanceMethod.typedef)', }.items(): - GLOBALS['Py%s_Type#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) + GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) - for cpyname in 'Method List Long Dict Tuple'.split(): - FORWARD_DECLS.append('typedef struct { PyObject_HEAD } ' - 'Py%sObject' % (cpyname, )) + for cpyname in '''PyMethodObject PyListObject PyLongObject + PyDictObject PyTupleObject'''.split(): + FORWARD_DECLS.append('typedef struct { PyObject_HEAD } %s' + % (cpyname, )) build_exported_objects() def get_structtype_for_ctype(ctype): diff --git a/pypy/module/cpyext/test/foo3.c b/pypy/module/cpyext/test/foo3.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/foo3.c @@ -0,0 +1,73 @@ +#include +#include + +PyObject* foo3type_tp_new(PyTypeObject* metatype, PyObject* args, PyObject* kwds) +{ + printf("in foo3type_tp_new, preprocessing...\n"); + PyObject* newType = PyType_Type.tp_new(metatype, args, kwds); + printf("in foo3type_tp_new, postprocessing...\n"); + return newType; +} + +PyTypeObject Foo3Type_Type = { + PyVarObject_HEAD_INIT(0, 0) + /*tp_name*/ "Foo3.Type", + /*tp_basicsize*/ sizeof(PyTypeObject), + /*tp_itemsize*/ 0, + /*tp_dealloc*/ 0, + /*tp_print*/ 0, + /*tp_getattr*/ 0, + /*tp_setattr*/ 0, + /*tp_compare*/ 0, + /*tp_repr*/ 0, + /*tp_as_number*/ 0, + /*tp_as_sequence*/ 0, + /*tp_as_mapping*/ 0, + /*tp_hash*/ 0, + /*tp_call*/ 0, + /*tp_str*/ 0, + /*tp_getattro*/ 0, + /*tp_setattro*/ 0, + /*tp_as_buffer*/ 0, + /*tp_flags*/ Py_TPFLAGS_DEFAULT, + /*tp_doc*/ 0, + /*tp_traverse*/ 0, + /*tp_clear*/ 0, + /*tp_richcompare*/ 0, + /*tp_weaklistoffset*/ 0, + /*tp_iter*/ 0, + /*tp_iternext*/ 0, + /*tp_methods*/ 0, + /*tp_members*/ 0, + /*tp_getset*/ 0, + /*tp_base*/ 0, // set to &PyType_Type in module init function (why can it not be done here?) + /*tp_dict*/ 0, + /*tp_descr_get*/ 0, + /*tp_descr_set*/ 0, + /*tp_dictoffset*/ 0, + /*tp_init*/ 0, + /*tp_alloc*/ 0, + /*tp_new*/ foo3type_tp_new, + /*tp_free*/ 0, + /*tp_is_gc*/ 0, + /*tp_bases*/ 0, + /*tp_mro*/ 0, + /*tp_cache*/ 0, + /*tp_subclasses*/ 0, + /*tp_weaklist*/ 0 +}; + +static PyMethodDef sbkMethods[] = {{NULL, NULL, 0, NULL}}; + +#ifdef _WIN32 + __declspec(dllexport) void // PyModINIT_FUNC is broken on PyPy/Windows +#else + PyMODINIT_FUNC +#endif +initfoo3(void) +{ + PyObject* mod = Py_InitModule("Foo3", sbkMethods); + Foo3Type_Type.tp_base = &PyType_Type; + PyType_Ready(&Foo3Type_Type); + PyModule_AddObject(mod, "Type", (PyObject*)&Foo3Type_Type); +} diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -580,3 +580,9 @@ assert bool(module.newInt(1)) assert bool(module.newInt(-1)) raises(ValueError, bool, module.newInt(-42)) + + def test_tp_new_in_subclass_of_type(self): + skip("BROKEN") + module = self.import_module(name='foo3') + print 'calling module.Type()...' + module.Type("X", (object,), {}) diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -36,15 +36,15 @@ raises(TypeError, fcntl.fcntl, "foo") raises(TypeError, fcntl.fcntl, f, "foo") exc = raises(TypeError, fcntl.fcntl, F("foo"), 1) - assert exc.value[0] == 'fileno() returned a non-integer' + assert str(exc.value) == 'fileno() returned a non-integer' exc = raises(ValueError, fcntl.fcntl, 2147483647 + 1, 1, 0) - assert exc.value[0] == 'file descriptor cannot be a negative integer (-1)' + assert str(exc.value) == 'file descriptor cannot be a negative integer (-1)' exc = raises(ValueError, fcntl.fcntl, F(2147483647 + 1), 1, 0) - assert exc.value[0] == 'file descriptor cannot be a negative integer (-1)' + assert str(exc.value) == 'file descriptor cannot be a negative integer (-1)' exc = raises(ValueError, fcntl.fcntl, -2147483648 - 1, 1, 0) - assert exc.value[0] == 'file descriptor cannot be a negative integer (-1)' + assert str(exc.value) == 'file descriptor cannot be a negative integer (-1)' exc = raises(ValueError, fcntl.fcntl, F(-2147483648 - 1), 1, 0) - assert exc.value[0] == 'file descriptor cannot be a negative integer (-1)' + assert str(exc.value) == 'file descriptor cannot be a negative integer (-1)' raises(ValueError, fcntl.fcntl, -1, 1, 0) raises(ValueError, fcntl.fcntl, F(-1), 1, 0) raises(ValueError, fcntl.fcntl, F(int(-1)), 1, 0) diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import oefmt from rpython.rlib.listsort import make_timsort_class from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import widen diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1738,7 +1738,10 @@ self._store(storage, i, offset, box, width) class UnicodeType(FlexibleType): - T = lltype.UniChar + T = lltype.Char + + def get_element_size(self): + return 4 # always UTF-32 @jit.unroll_safe def coerce(self, space, dtype, w_item): diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -837,6 +837,6 @@ m.close() assert False, "should not have been able to mmap empty file" except ValueError as e: - assert e.message == "cannot mmap an empty file" + assert str(e) == "cannot mmap an empty file" except BaseException as e: assert False, "unexpected exception: " + str(e) diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -24,7 +24,7 @@ assert type(pw.pw_gid) is int raises(TypeError, pwd.getpwuid) raises(TypeError, pwd.getpwuid, 3.14) - raises(KeyError, pwd.getpwuid, sys.maxint) + raises(KeyError, pwd.getpwuid, sys.maxsize) # -1 is allowed, cast to uid_t exc = raises(KeyError, pwd.getpwuid, -1) m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value[0]) diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -356,14 +356,16 @@ seconds = pytime.time() else: seconds = space.float_w(w_seconds) - try: - seconds = ovfcheck_float_to_int(seconds) - t = rffi.r_time_t(seconds) - if rffi.cast(lltype.Signed, t) != seconds: - raise OverflowError - except OverflowError: + # + t = rffi.cast(rffi.TIME_T, seconds) + # + # Logic from CPython: How much info did we lose? We assume that + # time_t is an integral type. If we lost a second or more, the + # input doesn't fit in a time_t; call it an error. + diff = seconds - rffi.cast(lltype.Float, t) + if diff <= -1.0 or diff >= 1.0: raise OperationError(space.w_ValueError, - space.wrap("time argument too large")) + space.wrap("timestamp out of range for platform time_t")) return t def _tm_to_tuple(space, t): diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -68,6 +68,8 @@ assert 0 <= (t1 - t0) < 1.2 t = rctime.time() assert rctime.gmtime(t) == rctime.gmtime(t) + raises(ValueError, rctime.gmtime, 2**64) + raises(ValueError, rctime.gmtime, -2**64) def test_localtime(self): import time as rctime diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -219,15 +219,15 @@ pollster = select.poll() pollster.register(1) exc = raises(OverflowError, pollster.register, 0, 32768) # SHRT_MAX + 1 - assert exc.value[0] == 'signed short integer is greater than maximum' + assert str(exc.value) == 'signed short integer is greater than maximum' exc = raises(OverflowError, pollster.register, 0, -32768 - 1) - assert exc.value[0] == 'signed short integer is less than minimum' + assert str(exc.value) == 'signed short integer is less than minimum' raises(OverflowError, pollster.register, 0, 65535) # USHRT_MAX + 1 raises(OverflowError, pollster.poll, 2147483648) # INT_MAX + 1 raises(OverflowError, pollster.poll, -2147483648 - 1) raises(OverflowError, pollster.poll, 4294967296) # UINT_MAX + 1 exc = raises(TypeError, pollster.poll, '123') - assert exc.value[0] == 'timeout must be an integer or None' + assert str(exc.value) == 'timeout must be an integer or None' class AppTestSelectWithPipes(_AppTestSelect): @@ -277,7 +277,7 @@ pollster.unregister(fd) pollster.register(w, select.POLLOUT) exc = raises(RuntimeError, pollster.poll) - assert exc.value[0] == 'concurrent poll() invocation' + assert str(exc.value) == 'concurrent poll() invocation' finally: # and make the call to poll() from the thread return os.write(w, b'spam') diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_function.py @@ -36,11 +36,13 @@ return self._value lib_m = 'm' +has_sinf = True if sys.platform == 'win32': #there is a small chance this fails on Mingw via environ $CC import distutils.ccompiler if distutils.ccompiler.get_default_compiler() == 'msvc': lib_m = 'msvcrt' + has_sinf = False class TestFunction(object): Backend = CTypesBackend @@ -55,6 +57,8 @@ assert x == math.sin(1.23) def test_sinf(self): + if not has_sinf: + py.test.skip("sinf not available") ffi = FFI(backend=self.Backend()) ffi.cdef(""" float sinf(float x); diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -236,8 +236,14 @@ return 42 con.set_authorizer(authorizer_cb) with pytest.raises(_sqlite3.OperationalError) as e: - con.execute('select 42') - assert str(e.value) == 'authorizer malfunction' + con.execute('select 123') + major, minor, micro = _sqlite3.sqlite_version.split('.')[:3] + if (int(major), int(minor), int(micro)) >= (3, 6, 14): + assert str(e.value) == 'authorizer malfunction' + else: + assert str(e.value) == \ + ("illegal return value (1) from the authorization function - " + "should be SQLITE_OK, SQLITE_IGNORE, or SQLITE_DENY") def test_issue1573(con): diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -153,9 +153,9 @@ raises(ValueError, zlib.decompressobj().flush, -1) raises(TypeError, zlib.decompressobj().flush, None) raises(OverflowError, zlib.decompressobj().flush, 2**31) - raises(ValueError, zlib.decompressobj().decompress, 'abc', -1) - raises(TypeError, zlib.decompressobj().decompress, 'abc', None) - raises(OverflowError, zlib.decompressobj().decompress, 'abc', 2**31) + raises(ValueError, zlib.decompressobj().decompress, b'abc', -1) + raises(TypeError, zlib.decompressobj().decompress, b'abc', None) + raises(OverflowError, zlib.decompressobj().decompress, b'abc', 2**31) raises(TypeError, self.zlib.decompress, self.compressed, None) raises(OverflowError, self.zlib.decompress, self.compressed, 2**31) @@ -164,21 +164,21 @@ co = zlib.compressobj(zlib.Z_BEST_COMPRESSION) assert co.flush() # Returns a zlib header dco = zlib.decompressobj() - assert dco.flush() == "" + assert dco.flush() == b"" def test_decompress_incomplete_stream(self): import zlib # This is 'foo', deflated - x = 'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' + x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # For the record - assert zlib.decompress(x) == 'foo' + assert zlib.decompress(x) == b'foo' raises(zlib.error, zlib.decompress, x[:-5]) # Omitting the stream end works with decompressor objects # (see issue #8672). dco = zlib.decompressobj() y = dco.decompress(x[:-5]) y += dco.flush() - assert y == 'foo' + assert y == b'foo' def test_unused_data(self): """ @@ -243,13 +243,13 @@ def test_flush_with_freed_input(self): # Issue #16411: decompressor accesses input to last decompress() call # in flush(), even if this object has been freed in the meanwhile. - input1 = 'abcdefghijklmnopqrstuvwxyz' - input2 = 'QWERTYUIOPASDFGHJKLZXCVBNM' + input1 = b'abcdefghijklmnopqrstuvwxyz' + input2 = b'QWERTYUIOPASDFGHJKLZXCVBNM' data = self.zlib.compress(input1) dco = self.zlib.decompressobj() dco.decompress(data, 1) del data data = self.zlib.compress(input2) assert dco.flush(1) == input1[1:] - assert dco.unused_data == '' - assert dco.unconsumed_tail == '' + assert dco.unused_data == b'' + assert dco.unconsumed_tail == b'' diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -188,7 +188,8 @@ # this function is written directly in C; gcc will optimize it using SSE eci = ExternalCompilationInfo(post_include_bits=[""" static void pypy__decay_jit_counters(char *data, double f1, long size) { - struct { float times[5]; unsigned short subhashes[5]; } *p = data; + struct rpy_jitcnt { float times[5]; unsigned short subhashes[5]; }; + struct rpy_jitcnt *p = (struct rpy_jitcnt *)data; float f = (float)f1; long i; for (i=0; i Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69874:1480ca4330b0 Date: 2014-03-11 13:22 -0700 http://bitbucket.org/pypy/pypy/changeset/1480ca4330b0/ Log: adapt to py3 diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -435,19 +435,19 @@ def test_cpython_issue16029(self): import sys - M = min(sys.maxint, sys.maxsize) - x = xrange(0, M, M - 1) - assert x.__reduce__() == (xrange, (0, M, M - 1)) - x = xrange(0, -M, 1 - M) - assert x.__reduce__() == (xrange, (0, -M - 1, 1 - M)) + M = sys.maxsize + x = range(0, M, M - 1) + assert x.__reduce__() == (range, (0, M, M - 1)) + x = range(0, -M, 1 - M) + assert x.__reduce__() == (range, (0, -M, 1 - M)) def test_cpython_issue16030(self): import sys - M = min(sys.maxint, sys.maxsize) - x = xrange(0, M, M - 1) - assert repr(x) == 'xrange(0, %s, %s)' % (M, M - 1) - x = xrange(0, -M, 1 - M) - assert repr(x) == 'xrange(0, %s, %s)' % (-M - 1, 1 - M) + M = sys.maxsize + x = range(0, M, M - 1) + assert repr(x) == 'range(0, %s, %s)' % (M, M - 1), repr(x) + x = range(0, -M, 1 - M) + assert repr(x) == 'range(0, %s, %s)' % (-M, 1 - M), repr(x) class AppTestReversed: diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -272,7 +272,7 @@ # port can be None, int or string if space.is_w(w_port, space.w_None): port = None - elif space.isinstance_w(w_port, space.w_int) or space.isinstance_w(w_port, space.w_long): + elif space.isinstance_w(w_port, space.w_int): port = str(space.int_w(w_port)) elif space.isinstance_w(w_port, space.w_bytes): port = space.bytes_w(w_port) diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -548,7 +548,9 @@ try: w_value = maybe_int(space, w_value) except OperationError: - w_value = space.long(w_value) + raise oefmt(space.w_TypeError, + "%s format: a number is required, not %T", + fmt, w_value) try: value = space.int_w(w_value) return fmt % (value,) diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -184,14 +184,14 @@ x = MyInt(65) assert '%c' % x == 'A' - def test_format_retry_with_long_if_int_fails(self): + def test_int_fails(self): class IntFails(object): def __int__(self): raise Exception - def __long__(self): - return 0L - assert "%x" % IntFails() == '0' + exc = raises(TypeError, "%x".__mod__, IntFails()) + expected = "%x format: a number is required, not IntFails" + assert str(exc.value) == expected def test_formatting_huge_precision(self): prec = 2**31 From noreply at buildbot.pypy.org Wed Mar 12 01:02:20 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 01:02:20 +0100 (CET) Subject: [pypy-commit] pypy default: py3k compat Message-ID: <20140312000220.D2C4D1C027C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69875:1fa52d46dfdc Date: 2014-03-11 16:54 -0700 http://bitbucket.org/pypy/pypy/changeset/1fa52d46dfdc/ Log: py3k compat diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -208,14 +208,14 @@ prec = 2**31 format_string = "%.{}f".format(prec) exc = raises(ValueError, "format_string % 2.34") - assert exc.value[0] == 'prec too big' + assert str(exc.value) == 'prec too big' raises(OverflowError, lambda: u'%.*f' % (prec, 1. / 7)) def test_formatting_huge_width(self): import sys format_string = "%{}f".format(sys.maxsize + 1) exc = raises(ValueError, "format_string % 2.34") - assert exc.value[0] == 'width too big' + assert str(exc.value) == 'width too big' class AppTestWidthPrec: def test_width(self): @@ -341,11 +341,11 @@ prec = 2**31 format_string = u"%.{}f".format(prec) exc = raises(ValueError, "format_string % 2.34") - assert exc.value[0] == 'prec too big' + assert str(exc.value) == 'prec too big' raises(OverflowError, lambda: u'%.*f' % (prec, 1. / 7)) def test_formatting_huge_width(self): import sys format_string = u"%{}f".format(sys.maxsize + 1) exc = raises(ValueError, "format_string % 2.34") - assert exc.value[0] == 'width too big' + assert str(exc.value) == 'width too big' From noreply at buildbot.pypy.org Wed Mar 12 01:02:22 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 01:02:22 +0100 (CET) Subject: [pypy-commit] pypy py3k: readd support for u'' literals (pep 414). this doesn't land until CPython 3.3 Message-ID: <20140312000222.5A64A1C027C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69876:d3304f165e53 Date: 2014-03-11 16:44 -0700 http://bitbucket.org/pypy/pypy/changeset/d3304f165e53/ Log: readd support for u'' literals (pep 414). this doesn't land until CPython 3.3 but it may allow some code targetting 3.3 to run on our current 3.2 target. it also eases syncing test cases from the default branch diff --git a/pypy/interpreter/pyparser/dfa_generated.py b/pypy/interpreter/pyparser/dfa_generated.py --- a/pypy/interpreter/pyparser/dfa_generated.py +++ b/pypy/interpreter/pyparser/dfa_generated.py @@ -27,7 +27,7 @@ 'I': 1, 'J': 1, 'K': 1, 'L': 1, 'M': 1, 'N': 1, 'O': 1, 'P': 1, 'Q': 1, 'R': 3, 'S': 1, 'T': 1, - 'U': 1, 'V': 1, 'W': 1, 'X': 1, + 'U': 3, 'V': 1, 'W': 1, 'X': 1, 'Y': 1, 'Z': 1, '[': 14, '\\': 18, ']': 14, '^': 13, '_': 1, '`': 14, 'a': 1, 'b': 2, 'c': 1, 'd': 1, @@ -35,7 +35,7 @@ 'i': 1, 'j': 1, 'k': 1, 'l': 1, 'm': 1, 'n': 1, 'o': 1, 'p': 1, 'q': 1, 'r': 3, 's': 1, 't': 1, - 'u': 1, 'v': 1, 'w': 1, 'x': 1, + 'u': 3, 'v': 1, 'w': 1, 'x': 1, 'y': 1, 'z': 1, '{': 14, '|': 13, '}': 14, '~': 14, '\x80': 1}, # 1 diff --git a/pypy/interpreter/pyparser/gendfa.py b/pypy/interpreter/pyparser/gendfa.py --- a/pypy/interpreter/pyparser/gendfa.py +++ b/pypy/interpreter/pyparser/gendfa.py @@ -149,9 +149,11 @@ funny = group(states, operator, bracket, special) # ____________________________________________________________ def makeStrPrefix (): - return chain(states, - maybe(states, groupStr(states, "bB")), - maybe(states, groupStr(states, "rR"))) + return group(states, + chain(states, + maybe(states, groupStr(states, "bB")), + maybe(states, groupStr(states, "rR"))), + maybe(states, groupStr(states, "uU"))) # ____________________________________________________________ contStr = group(states, chain(states, diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -21,13 +21,18 @@ quote = s[ps] rawmode = False unicode_literal = True + saw_u = False # string decoration handling if quote == 'b' or quote == 'B': ps += 1 quote = s[ps] unicode_literal = False - if quote == 'r' or quote == 'R': + elif quote == 'u' or quote == 'U': + ps += 1 + quote = s[ps] + saw_u = True + if not saw_u and quote == 'r' or quote == 'R': ps += 1 quote = s[ps] rawmode = True diff --git a/pypy/interpreter/pyparser/pytokenize.py b/pypy/interpreter/pyparser/pytokenize.py --- a/pypy/interpreter/pyparser/pytokenize.py +++ b/pypy/interpreter/pyparser/pytokenize.py @@ -25,6 +25,8 @@ '"' : doubleDFA, 'r' : None, 'R' : None, + "u" : None, + "U" : None, 'b' : None, 'B' : None} @@ -33,6 +35,8 @@ prefix = uniPrefix + rawPrefix endDFAs[prefix + "'''"] = single3DFA endDFAs[prefix + '"""'] = double3DFA +endDFAs["u'''"] = single3DFA +endDFAs['U"""'] = double3DFA whiteSpaceStatesAccepts = [True] whiteSpaceStates = [{'\t': 0, ' ': 0, '\x0c': 0}] @@ -44,6 +48,7 @@ triple_quoted = {} for t in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', + "u'''", 'u"""', "U'''", 'U"""', "b'''", 'b"""', "B'''", 'B"""', "br'''", 'br"""', "Br'''", 'Br"""', "bR'''", 'bR"""', "BR'''", 'BR"""'): @@ -51,6 +56,7 @@ single_quoted = {} for t in ("'", '"', "r'", 'r"', "R'", 'R"', + "u'", 'u"', "U'", 'U"', "b'", 'b"', "B'", 'B"', "br'", 'br"', "Br'", 'Br"', "bR'", 'bR"', "BR'", 'BR"'): diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -62,6 +62,19 @@ ret = space.unwrap(w_ret) assert ret == eval("# -*- coding: koi8-u -*-\nu'\x81'") + def test_unicode_pep414(self): + space = self.space + for s in [u'hello world', u'hello\n world']: + self.parse_and_compare(repr(s), unicode(s)) + + self.parse_and_compare("u'''hello\\x42 world'''", + u'hello\x42 world') + self.parse_and_compare("u'''hello\\u0842 world'''", + u'hello\u0842 world') + + space.raises_w(space.w_ValueError, + parsestring.parsestr, space, None, "ur'foo'") + def test_unicode_literals(self): space = self.space w_ret = parsestring.parsestr(space, None, repr("hello")) From noreply at buildbot.pypy.org Wed Mar 12 01:02:23 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 01:02:23 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: merge py3k Message-ID: <20140312000223.9A6281C02CF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69877:d9272670e477 Date: 2014-03-11 16:55 -0700 http://bitbucket.org/pypy/pypy/changeset/d9272670e477/ Log: merge py3k diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -333,7 +333,7 @@ Representation: (days, seconds, microseconds). Why? Because I felt like it. """ - __slots__ = '_days', '_seconds', '_microseconds' + __slots__ = '_days', '_seconds', '_microseconds', '_hashcode' def __new__(cls, days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0): @@ -426,14 +426,14 @@ assert isinstance(s, int) and 0 <= s < 24*3600 assert isinstance(us, int) and 0 <= us < 1000000 + if abs(d) > 999999999: + raise OverflowError("timedelta # of days is too large: %d" % d) + self = object.__new__(cls) - self._days = d self._seconds = s self._microseconds = us - if abs(d) > 999999999: - raise OverflowError("timedelta # of days is too large: %d" % d) - + self._hashcode = -1 return self def __repr__(self): @@ -617,7 +617,9 @@ return _cmp(self._getstate(), other._getstate()) def __hash__(self): - return hash(self._getstate()) + if self._hashcode == -1: + self._hashcode = hash(self._getstate()) + return self._hashcode def __bool__(self): return (self._days != 0 or @@ -665,7 +667,7 @@ Properties (readonly): year, month, day """ - __slots__ = '_year', '_month', '_day' + __slots__ = '_year', '_month', '_day', '_hashcode' def __new__(cls, year, month=None, day=None): """Constructor. @@ -679,12 +681,14 @@ # Pickle support self = object.__new__(cls) self.__setstate(year) + self._hashcode = -1 return self year, month, day = _check_date_fields(year, month, day) self = object.__new__(cls) self._year = year self._month = month self._day = day + self._hashcode = -1 return self # Additional constructors @@ -847,7 +851,9 @@ def __hash__(self): "Hash." - return hash(self._getstate()) + if self._hashcode == -1: + self._hashcode = hash(self._getstate()) + return self._hashcode # Computations @@ -1022,7 +1028,7 @@ Properties (readonly): hour, minute, second, microsecond, tzinfo """ - __slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo' + __slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode' def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): """Constructor. @@ -1037,6 +1043,7 @@ # Pickle support self = object.__new__(cls) self.__setstate(hour, minute or None) + self._hashcode = -1 return self hour, minute, second, microsecond = _check_time_fields( hour, minute, second, microsecond) @@ -1047,6 +1054,7 @@ self._second = second self._microsecond = microsecond self._tzinfo = tzinfo + self._hashcode = -1 return self # Read-only field accessors @@ -1142,16 +1150,20 @@ def __hash__(self): """Hash.""" - tzoff = self.utcoffset() - if not tzoff: # zero or None - return hash(self._getstate()[0]) - h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff, - timedelta(hours=1)) - assert not m % timedelta(minutes=1), "whole minute" - m //= timedelta(minutes=1) - if 0 <= h < 24: - return hash(time(h, m, self.second, self.microsecond)) - return hash((h, m, self.second, self.microsecond)) + if self._hashcode == -1: + tzoff = self.utcoffset() + if not tzoff: # zero or None + self._hashcode = hash(self._getstate()[0]) + else: + h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff, + timedelta(hours=1)) + assert not m % timedelta(minutes=1), "whole minute" + m //= timedelta(minutes=1) + if 0 <= h < 24: + self._hashcode = hash(time(h, m, self.second, self.microsecond)) + else: + self._hashcode = hash((h, m, self.second, self.microsecond)) + return self._hashcode # Conversion to string @@ -1292,12 +1304,11 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): + if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): + raise TypeError("bad tzinfo state arg") self._hour, self._minute, self._second, us1, us2, us3 = string self._microsecond = (((us1 << 8) | us2) << 8) | us3 - if tzinfo is None or isinstance(tzinfo, _tzinfo_class): - self._tzinfo = tzinfo - else: - raise TypeError("bad tzinfo state arg") + self._tzinfo = tzinfo def __reduce__(self): return (time, self._getstate()) @@ -1320,8 +1331,9 @@ microsecond=0, tzinfo=None): if isinstance(year, bytes) and len(year) == 10 and 1 <= year[2] <= 12: # Pickle support - self = date.__new__(cls, year[:4]) + self = object.__new__(cls) self.__setstate(year, month) + self._hashcode = -1 return self year, month, day = _check_date_fields(year, month, day) hour, minute, second, microsecond = _check_time_fields( @@ -1336,6 +1348,7 @@ self._second = second self._microsecond = microsecond self._tzinfo = tzinfo + self._hashcode = -1 return self # Read-only field accessors @@ -1736,12 +1749,15 @@ return base + otoff - myoff def __hash__(self): - tzoff = self.utcoffset() - if tzoff is None: - return hash(self._getstate()[0]) - days = _ymd2ord(self.year, self.month, self.day) - seconds = self.hour * 3600 + self.minute * 60 + self.second - return hash(timedelta(days, seconds, self.microsecond) - tzoff) + if self._hashcode == -1: + tzoff = self.utcoffset() + if tzoff is None: + self._hashcode = hash(self._getstate()[0]) + else: + days = _ymd2ord(self.year, self.month, self.day) + seconds = self.hour * 3600 + self.minute * 60 + self.second + self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff) + return self._hashcode # Pickle support. @@ -1758,14 +1774,13 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): + if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): + raise TypeError("bad tzinfo state arg") (yhi, ylo, self._month, self._day, self._hour, self._minute, self._second, us1, us2, us3) = string self._year = yhi * 256 + ylo self._microsecond = (((us1 << 8) | us2) << 8) | us3 - if tzinfo is None or isinstance(tzinfo, _tzinfo_class): - self._tzinfo = tzinfo - else: - raise TypeError("bad tzinfo state arg") + self._tzinfo = tzinfo def __reduce__(self): return (self.__class__, self._getstate()) diff --git a/pypy/interpreter/pyparser/dfa_generated.py b/pypy/interpreter/pyparser/dfa_generated.py --- a/pypy/interpreter/pyparser/dfa_generated.py +++ b/pypy/interpreter/pyparser/dfa_generated.py @@ -27,7 +27,7 @@ 'I': 1, 'J': 1, 'K': 1, 'L': 1, 'M': 1, 'N': 1, 'O': 1, 'P': 1, 'Q': 1, 'R': 3, 'S': 1, 'T': 1, - 'U': 1, 'V': 1, 'W': 1, 'X': 1, + 'U': 3, 'V': 1, 'W': 1, 'X': 1, 'Y': 1, 'Z': 1, '[': 14, '\\': 18, ']': 14, '^': 13, '_': 1, '`': 14, 'a': 1, 'b': 2, 'c': 1, 'd': 1, @@ -35,7 +35,7 @@ 'i': 1, 'j': 1, 'k': 1, 'l': 1, 'm': 1, 'n': 1, 'o': 1, 'p': 1, 'q': 1, 'r': 3, 's': 1, 't': 1, - 'u': 1, 'v': 1, 'w': 1, 'x': 1, + 'u': 3, 'v': 1, 'w': 1, 'x': 1, 'y': 1, 'z': 1, '{': 14, '|': 13, '}': 14, '~': 14, '\x80': 1}, # 1 diff --git a/pypy/interpreter/pyparser/gendfa.py b/pypy/interpreter/pyparser/gendfa.py --- a/pypy/interpreter/pyparser/gendfa.py +++ b/pypy/interpreter/pyparser/gendfa.py @@ -149,9 +149,11 @@ funny = group(states, operator, bracket, special) # ____________________________________________________________ def makeStrPrefix (): - return chain(states, - maybe(states, groupStr(states, "bB")), - maybe(states, groupStr(states, "rR"))) + return group(states, + chain(states, + maybe(states, groupStr(states, "bB")), + maybe(states, groupStr(states, "rR"))), + maybe(states, groupStr(states, "uU"))) # ____________________________________________________________ contStr = group(states, chain(states, diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -22,13 +22,18 @@ quote = s[ps] rawmode = False unicode_literal = True + saw_u = False # string decoration handling if quote == 'b' or quote == 'B': ps += 1 quote = s[ps] unicode_literal = False - if quote == 'r' or quote == 'R': + elif quote == 'u' or quote == 'U': + ps += 1 + quote = s[ps] + saw_u = True + if not saw_u and quote == 'r' or quote == 'R': ps += 1 quote = s[ps] rawmode = True diff --git a/pypy/interpreter/pyparser/pytokenize.py b/pypy/interpreter/pyparser/pytokenize.py --- a/pypy/interpreter/pyparser/pytokenize.py +++ b/pypy/interpreter/pyparser/pytokenize.py @@ -25,6 +25,8 @@ '"' : doubleDFA, 'r' : None, 'R' : None, + "u" : None, + "U" : None, 'b' : None, 'B' : None} @@ -33,6 +35,8 @@ prefix = uniPrefix + rawPrefix endDFAs[prefix + "'''"] = single3DFA endDFAs[prefix + '"""'] = double3DFA +endDFAs["u'''"] = single3DFA +endDFAs['U"""'] = double3DFA whiteSpaceStatesAccepts = [True] whiteSpaceStates = [{'\t': 0, ' ': 0, '\x0c': 0}] @@ -44,6 +48,7 @@ triple_quoted = {} for t in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', + "u'''", 'u"""', "U'''", 'U"""', "b'''", 'b"""', "B'''", 'B"""', "br'''", 'br"""', "Br'''", 'Br"""', "bR'''", 'bR"""', "BR'''", 'BR"""'): @@ -51,6 +56,7 @@ single_quoted = {} for t in ("'", '"', "r'", 'r"', "R'", 'R"', + "u'", 'u"', "U'", 'U"', "b'", 'b"', "B'", 'B"', "br'", 'br"', "Br'", 'Br"', "bR'", 'bR"', "BR'", 'BR"'): diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -62,6 +62,19 @@ ret = space.unwrap(w_ret) assert ret == eval("# -*- coding: koi8-u -*-\nu'\x81'") + def test_unicode_pep414(self): + space = self.space + for s in [u'hello world', u'hello\n world']: + self.parse_and_compare(repr(s), unicode(s)) + + self.parse_and_compare("u'''hello\\x42 world'''", + u'hello\x42 world') + self.parse_and_compare("u'''hello\\u0842 world'''", + u'hello\u0842 world') + + space.raises_w(space.w_ValueError, + parsestring.parsestr, space, None, "ur'foo'") + def test_unicode_literals(self): space = self.space w_ret = parsestring.parsestr(space, None, repr("hello")) From noreply at buildbot.pypy.org Wed Mar 12 01:02:24 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 01:02:24 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: merge default Message-ID: <20140312000224.DFE561C02F2@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69878:5ffb127e4fc8 Date: 2014-03-11 16:56 -0700 http://bitbucket.org/pypy/pypy/changeset/5ffb127e4fc8/ Log: merge default diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -197,14 +197,14 @@ prec = 2**31 format_string = "%.{}f".format(prec) exc = raises(ValueError, "format_string % 2.34") - assert exc.value[0] == 'prec too big' + assert str(exc.value) == 'prec too big' raises(OverflowError, lambda: u'%.*f' % (prec, 1. / 7)) def test_formatting_huge_width(self): import sys format_string = "%{}f".format(sys.maxsize + 1) exc = raises(ValueError, "format_string % 2.34") - assert exc.value[0] == 'width too big' + assert str(exc.value) == 'width too big' class AppTestWidthPrec: def test_width(self): @@ -317,14 +317,14 @@ prec = 2**31 format_string = u"%.{}f".format(prec) exc = raises(ValueError, "format_string % 2.34") - assert exc.value[0] == 'prec too big' + assert str(exc.value) == 'prec too big' raises(OverflowError, lambda: u'%.*f' % (prec, 1. / 7)) def test_formatting_huge_width(self): import sys format_string = u"%{}f".format(sys.maxsize + 1) exc = raises(ValueError, "format_string % 2.34") - assert exc.value[0] == 'width too big' + assert str(exc.value) == 'width too big' def test_ascii(self): assert "<%a>" % "test" == "<'test'>" From noreply at buildbot.pypy.org Wed Mar 12 01:52:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 12 Mar 2014 01:52:23 +0100 (CET) Subject: [pypy-commit] pypy default: py3k compat Message-ID: <20140312005223.077121C0128@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69879:1e5259694b26 Date: 2014-03-11 20:51 -0400 http://bitbucket.org/pypy/pypy/changeset/1e5259694b26/ Log: py3k compat diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -496,7 +496,7 @@ assert 'fake' in str(exc.value) exc = raises(SyntaxError, compile, b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') - assert 'iso-8859-15' in str(exc.value), str(exc.value) + assert 'iso-8859-15' in str(exc.value) assert 'BOM' in str(exc.value) exc = raises(SyntaxError, compile, b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec') diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -109,7 +109,7 @@ assert charmap_decode('xxx\xff', 'strict', map) == (u'xxx\xff', 4) exc = raises(TypeError, charmap_decode, '\xff', "strict", {0xff: 'a'}) - assert exc.value[0] == "character mapping must return integer, None or unicode" + assert str(exc.value) == "character mapping must return integer, None or unicode" raises(TypeError, charmap_decode, '\xff', "strict", {0xff: 0x110000}) assert (charmap_decode("\x00\x01\x02", "strict", {0: 0x10FFFF, 1: ord('b'), 2: ord('c')}) == @@ -615,9 +615,9 @@ import codecs exc = raises(TypeError, codecs.charmap_encode, u'\xff', "replace", {0xff: 300}) - assert exc.value[0] == 'character mapping must be in range(256)' + assert str(exc.value) == 'character mapping must be in range(256)' exc = raises(TypeError, codecs.charmap_encode, u'\xff', "replace", {0xff: u'a'}) - assert exc.value[0] == 'character mapping must return integer, None or str' + assert str(exc.value) == 'character mapping must return integer, None or str' raises(UnicodeError, codecs.charmap_encode, u"\xff", "replace", {0xff: None}) def test_charmap_encode_replace(self): diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -27,18 +27,18 @@ raises(KeyError, pwd.getpwuid, sys.maxsize) # -1 is allowed, cast to uid_t exc = raises(KeyError, pwd.getpwuid, -1) - m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value[0]) - assert m, exc.value[0] + m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value.args[0]) + assert m, exc.value.args[0] maxval = int(m.group(1)) assert maxval >= 2**32 - 1 # shouldn't overflow exc = raises(KeyError, pwd.getpwuid, maxval) - m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value[0]) - assert m, exc.value[0] + m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value.args[0]) + assert m, exc.value.args[0] # should be out of uid_t range for v in [-2, maxval+1, 2**128, -2**128]: exc = raises(KeyError, pwd.getpwuid, v) - assert exc.value[0] == 'getpwuid(): uid not found' + assert exc.value.args[0] == 'getpwuid(): uid not found' def test_getpwnam(self): import pwd From noreply at buildbot.pypy.org Wed Mar 12 03:40:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 12 Mar 2014 03:40:34 +0100 (CET) Subject: [pypy-commit] pypy default: add two pypyjit tests for micronumpy Message-ID: <20140312024034.453791C0128@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69880:15e695ac01d7 Date: 2014-03-11 22:39 -0400 http://bitbucket.org/pypy/pypy/changeset/15e695ac01d7/ Log: add two pypyjit tests for micronumpy diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -0,0 +1,59 @@ +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + + +class TestMicroNumPy(BaseTestPyPyC): + def test_array_getitem_basic(self): + def main(): + import _numpypy.multiarray as np + arr = np.zeros((300, 300)) + x = 150 + y = 0 + while y < 300: + a = arr[x, y] + y += 1 + return a + log = self.run(main, []) + assert log.result == 0 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i76 = int_lt(i71, 300) + guard_true(i76, descr=...) + i77 = int_ge(i71, i59) + guard_false(i77, descr=...) + i78 = int_mul(i71, i61) + i79 = int_add(i55, i78) + f80 = raw_load(i67, i79, descr=) + i81 = int_add(i71, 1) + guard_not_invalidated(descr=...) + --TICK-- + jump(p0, p1, p3, p6, p7, p12, p14, p16, i81, f80, i59, p38, i55, p40, i37, i61, i67, descr=...) + """) + + def test_array_getitem_accumulate(self): + def main(): + import _numpypy.multiarray as np + arr = np.zeros((300, 300)) + a = 0.0 + x = 150 + y = 0 + while y < 300: + a += arr[x, y] + y += 1 + return a + log = self.run(main, []) + assert log.result == 0 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i81 = int_lt(i76, 300) + guard_true(i81, descr=...) + i82 = int_ge(i76, i62) + guard_false(i82, descr=...) + i83 = int_mul(i76, i64) + i84 = int_add(i58, i83) + f85 = raw_load(i70, i84, descr=) + guard_not_invalidated(descr=...) + f86 = float_add(f74, f85) + i87 = int_add(i76, 1) + --TICK-- + jump(p0, p1, p3, p6, p7, p12, p14, f86, p18, i87, i62, p41, i58, p47, i40, i64, i70, descr=...) + """) From noreply at buildbot.pypy.org Wed Mar 12 03:51:44 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 03:51:44 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: merge default Message-ID: <20140312025144.E9FB51C027C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69881:512fc23baf35 Date: 2014-03-11 19:45 -0700 http://bitbucket.org/pypy/pypy/changeset/512fc23baf35/ Log: merge default diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -496,7 +496,7 @@ assert 'fake' in str(exc.value) exc = raises(SyntaxError, compile, b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') - assert 'iso-8859-15' in str(exc.value), str(exc.value) + assert 'iso-8859-15' in str(exc.value) assert 'BOM' in str(exc.value) exc = raises(SyntaxError, compile, b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec') diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -110,7 +110,7 @@ assert charmap_decode(b'xxx\xff', 'strict', map) == ('xxx\xff', 4) exc = raises(TypeError, charmap_decode, '\xff', "strict", {0xff: 'a'}) - assert exc.value[0] == "character mapping must return integer, None or unicode" + assert str(exc.value) == "character mapping must return integer, None or unicode" raises(TypeError, charmap_decode, '\xff', "strict", {0xff: 0x110000}) assert (charmap_decode("\x00\x01\x02", "strict", {0: 0x10FFFF, 1: ord('b'), 2: ord('c')}) == @@ -671,11 +671,11 @@ assert 'xxx'.encode('charmap') == b'xxx' import codecs - exc = raises(TypeError, codecs.charmap_encode, '\xff', "replace", {0xff: 300}) - assert exc.value[0] == 'character mapping must be in range(256)' - exc = raises(TypeError, codecs.charmap_encode, '\xff', "replace", {0xff: u'a'}) - assert exc.value[0] == 'character mapping must return integer, None or str' - raises(UnicodeError, codecs.charmap_encode, "\xff", "replace", {0xff: None}) + exc = raises(TypeError, codecs.charmap_encode, u'\xff', "replace", {0xff: 300}) + assert str(exc.value) == 'character mapping must be in range(256)' + exc = raises(TypeError, codecs.charmap_encode, u'\xff', "replace", {0xff: u'a'}) + assert str(exc.value) == 'character mapping must return integer, None or str' + raises(UnicodeError, codecs.charmap_encode, u"\xff", "replace", {0xff: None}) def test_charmap_encode_replace(self): charmap = dict([(c, bytes([c, c]).upper()) for c in b"abcdefgh"]) diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -27,18 +27,18 @@ raises(KeyError, pwd.getpwuid, sys.maxsize) # -1 is allowed, cast to uid_t exc = raises(KeyError, pwd.getpwuid, -1) - m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value[0]) - assert m, exc.value[0] + m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value.args[0]) + assert m, exc.value.args[0] maxval = int(m.group(1)) assert maxval >= 2**32 - 1 # shouldn't overflow exc = raises(KeyError, pwd.getpwuid, maxval) - m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value[0]) - assert m, exc.value[0] + m = re.match('getpwuid\(\): uid not found: ([0-9]+)', exc.value.args[0]) + assert m, exc.value.args[0] # should be out of uid_t range for v in [-2, maxval+1, 2**128, -2**128]: exc = raises(KeyError, pwd.getpwuid, v) - assert exc.value[0] == 'getpwuid(): uid not found' + assert exc.value.args[0] == 'getpwuid(): uid not found' def test_getpwnam(self): import pwd From noreply at buildbot.pypy.org Wed Mar 12 03:51:46 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 03:51:46 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: 2to3 Message-ID: <20140312025146.5850B1C027C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69882:ab2127d2fe3a Date: 2014-03-11 19:46 -0700 http://bitbucket.org/pypy/pypy/changeset/ab2127d2fe3a/ Log: 2to3 diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -109,13 +109,13 @@ map = tuple([chr(i) for i in range(256)]) assert charmap_decode(b'xxx\xff', 'strict', map) == ('xxx\xff', 4) - exc = raises(TypeError, charmap_decode, '\xff', "strict", {0xff: 'a'}) + exc = raises(TypeError, charmap_decode, b'\xff', "strict", {0xff: b'a'}) assert str(exc.value) == "character mapping must return integer, None or unicode" - raises(TypeError, charmap_decode, '\xff', "strict", {0xff: 0x110000}) - assert (charmap_decode("\x00\x01\x02", "strict", + raises(TypeError, charmap_decode, b'\xff', "strict", {0xff: 0x110000}) + assert (charmap_decode(b"\x00\x01\x02", "strict", {0: 0x10FFFF, 1: ord('b'), 2: ord('c')}) == u"\U0010FFFFbc", 3) - assert (charmap_decode("\x00\x01\x02", "strict", + assert (charmap_decode(b"\x00\x01\x02", "strict", {0: u'\U0010FFFF', 1: u'b', 2: u'c'}) == u"\U0010FFFFbc", 3) From noreply at buildbot.pypy.org Wed Mar 12 03:51:47 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 03:51:47 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: adapt to py3 Message-ID: <20140312025147.A381A1C027C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69883:2879fc4ab165 Date: 2014-03-11 19:49 -0700 http://bitbucket.org/pypy/pypy/changeset/2879fc4ab165/ Log: adapt to py3 diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -367,7 +367,7 @@ t = _io.TextIOWrapper(NonbytesStream(u'a')) raises(TypeError, t.readline) t = _io.TextIOWrapper(NonbytesStream(u'a')) - t.read() == u'a' + raises(TypeError, t.read) class AppTestIncrementalNewlineDecoder: From noreply at buildbot.pypy.org Wed Mar 12 03:56:59 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 03:56:59 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: woops Message-ID: <20140312025659.3AB341C027C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69884:3a39be41d085 Date: 2014-03-11 19:56 -0700 http://bitbucket.org/pypy/pypy/changeset/3a39be41d085/ Log: woops diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -486,7 +486,7 @@ 'PySlice_Type': 'space.gettypeobject(W_SliceObject.typedef)', 'PyStaticMethod_Type': 'space.gettypeobject(StaticMethod.typedef)', 'PyCFunction_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)', - 'PyWrapperDescr_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)' + 'PyWrapperDescr_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)', 'PyInstanceMethod_Type': 'space.gettypeobject(cpyext.classobject.InstanceMethod.typedef)', }.items(): GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) From noreply at buildbot.pypy.org Wed Mar 12 08:04:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 08:04:29 +0100 (CET) Subject: [pypy-commit] stmgc default: Add last_abort__bytes_in_nursery. Message-ID: <20140312070429.E67231C3152@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r975:5edbae0c780a Date: 2014-03-12 08:04 +0100 http://bitbucket.org/pypy/stmgc/changeset/5edbae0c780a/ Log: Add last_abort__bytes_in_nursery. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -466,7 +466,7 @@ struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); /* throw away the content of the nursery */ - throw_away_nursery(pseg); + long bytes_in_nursery = throw_away_nursery(pseg); /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_segments(segment_num); @@ -476,6 +476,7 @@ stm_thread_local_t *tl = pseg->pub.running_thread; tl->shadowstack = pseg->shadowstack_at_start_of_transaction; tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; + tl->last_abort__bytes_in_nursery = bytes_in_nursery; /* reset these lists to NULL too on abort */ LIST_FREE(pseg->objects_pointing_to_nursery); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -215,15 +215,15 @@ _collect_now(item)); } -static void throw_away_nursery(struct stm_priv_segment_info_s *pseg) +static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) { /* reset the nursery by zeroing it */ - size_t size; + size_t nursery_used; char *realnursery; realnursery = REAL_ADDRESS(pseg->pub.segment_base, _stm_nursery_start); - size = pseg->pub.nursery_current - (stm_char *)_stm_nursery_start; - memset(realnursery, 0, size); + nursery_used = pseg->pub.nursery_current - (stm_char *)_stm_nursery_start; + memset(realnursery, 0, nursery_used); pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; @@ -250,6 +250,7 @@ } tree_clear(pseg->nursery_objects_shadows); + return nursery_used; } #define MINOR_NOTHING_TO_DO(pseg) \ diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -11,7 +11,7 @@ static void minor_collection(bool commit); static void check_nursery_at_transaction_start(void); -static void throw_away_nursery(struct stm_priv_segment_info_s *pseg); +static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg); static void major_do_minor_collections(void); static inline bool must_abort(void) { diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -57,7 +57,10 @@ the following raw region of memory is cleared. */ char *mem_clear_on_abort; size_t mem_bytes_to_clear_on_abort; - /* the next fields are handled automatically by the library */ + /* after an abort, some details about the abort are stored there. + (these fields are not modified on a successful commit) */ + long last_abort__bytes_in_nursery; + /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; } stm_thread_local_t; diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -17,6 +17,7 @@ object_t *thread_local_obj; char *mem_clear_on_abort; size_t mem_bytes_to_clear_on_abort; + long last_abort__bytes_in_nursery; int associated_segment_num; ...; } stm_thread_local_t; diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -433,118 +433,16 @@ lp1 = self.pop_root() self.check_char_everywhere(lp1, 'X') - # def test_resolve_write_write_no_conflict(self): - # self.start_transaction() - # p1 = stm_allocate(16) - # p2 = stm_allocate(16) - # p1[8] = 'a' - # p2[8] = 'A' - # self.commit_transaction(False) - # self.start_transaction() - # # - # self.switch(1) - # self.start_transaction() - # stm_write(p1) - # p1[8] = 'b' - # self.commit_transaction(False) - # # - # self.switch(0) - # stm_write(p2) - # p2[8] = 'C' - # self.commit_transaction(False) - # assert p1[8] == 'b' - # assert p2[8] == 'C' - - # def test_page_extra_malloc_unchanged_page(self): - # self.start_transaction() - # p1 = stm_allocate(16) - # p2 = stm_allocate(16) - # p1[8] = 'A' - # p2[8] = 'a' - # self.commit_transaction(False) - # self.start_transaction() - # # - # self.switch(1) - # self.start_transaction() - # stm_write(p1) - # assert p1[8] == 'A' - # p1[8] = 'B' - # self.commit_transaction(False) - # # - # self.switch(0) - # stm_read(p2) - # assert p2[8] == 'a' - # p3 = stm_allocate(16) # goes into the same page, which is - # p3[8] = ':' # not otherwise modified - # self.commit_transaction(False) - # # - # assert p1[8] == 'B' - # assert p2[8] == 'a' - # assert p3[8] == ':' - - # def test_page_extra_malloc_changed_page_before(self): - # self.start_transaction() - # p1 = stm_allocate(16) - # p2 = stm_allocate(16) - # p1[8] = 'A' - # p2[8] = 'a' - # self.commit_transaction(False) - # self.start_transaction() - # # - # self.switch(1) - # self.start_transaction() - # stm_write(p1) - # assert p1[8] == 'A' - # p1[8] = 'B' - # self.commit_transaction(False) - # # - # self.switch(0) - # stm_write(p2) - # assert p2[8] == 'a' - # p2[8] = 'b' - # p3 = stm_allocate(16) # goes into the same page, which I already - # p3[8] = ':' # modified just above - # self.commit_transaction(False) - # # - # assert p1[8] == 'B' - # assert p2[8] == 'b' - # assert p3[8] == ':' - - # def test_page_extra_malloc_changed_page_after(self): - # self.start_transaction() - # p1 = stm_allocate(16) - # p2 = stm_allocate(16) - # p1[8] = 'A' - # p2[8] = 'a' - # self.commit_transaction(False) - # self.start_transaction() - # # - # self.switch(1) - # self.start_transaction() - # stm_write(p1) - # assert p1[8] == 'A' - # p1[8] = 'B' - # self.commit_transaction(False) - # # - # self.switch(0) - # p3 = stm_allocate(16) # goes into the same page, which I will - # p3[8] = ':' # modify just below - # stm_write(p2) - # assert p2[8] == 'a' - # p2[8] = 'b' - # self.commit_transaction(False) - # # - # assert p1[8] == 'B' - # assert p2[8] == 'b' - # assert p3[8] == ':' - - # def test_overflow_write_history(self): - # self.start_transaction() - # plist = [stm_allocate(n) for n in range(16, 256, 8)] - # self.commit_transaction(False) - # # - # for i in range(20): - # self.start_transaction() - # for p in plist: - # stm_write(p) - # self.commit_transaction(False) + def test_last_abort__bytes_in_nursery(self): + self.start_transaction() + stm_allocate(56) + self.abort_transaction() + assert self.get_stm_thread_local().last_abort__bytes_in_nursery == 56 + self.start_transaction() + assert self.get_stm_thread_local().last_abort__bytes_in_nursery == 56 + self.commit_transaction() + assert self.get_stm_thread_local().last_abort__bytes_in_nursery == 56 + self.start_transaction() + assert self.get_stm_thread_local().last_abort__bytes_in_nursery == 56 + self.abort_transaction() + assert self.get_stm_thread_local().last_abort__bytes_in_nursery == 0 From noreply at buildbot.pypy.org Wed Mar 12 08:24:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 08:24:08 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Reduce; add an XXX for the non-implemented weakrefs Message-ID: <20140312072408.D636D1C0460@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69885:c8bec5976825 Date: 2014-03-11 19:13 +0100 http://bitbucket.org/pypy/pypy/changeset/c8bec5976825/ Log: Reduce; add an XXX for the non-implemented weakrefs diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -72,6 +72,7 @@ hop.genop("stm_get_root_stack_top", [], resultvar=hop.spaceop.result) def gct_weakref_create(self, hop): + XXX op = hop.spaceop type_id = self.get_type_id(WEAKREF) @@ -104,21 +105,17 @@ resulttype=llmemory.WeakRefPtr) hop.cast_result(v_weakref) - - def _gct_with_roots_pushed(self, hop): - livevars = self.push_roots(hop) - self.default(hop) - self.pop_roots(hop, livevars) +## def _gct_with_roots_pushed(self, hop): +## livevars = self.push_roots(hop) +## self.default(hop) +## self.pop_roots(hop, livevars) - # sync with lloperation.py - gct_stm_become_inevitable = _gct_with_roots_pushed - gct_stm_set_transaction_length = _gct_with_roots_pushed - gct_stm_stop_all_other_threads = _gct_with_roots_pushed - gct_stm_partial_commit_and_resume_other_threads = _gct_with_roots_pushed - gct_stm_perform_transaction = _gct_with_roots_pushed - gct_stm_allocate_nonmovable_int_adr = _gct_with_roots_pushed - gct_stm_inspect_abort_info = _gct_with_roots_pushed - gct_stm_threadlocalref_set = _gct_with_roots_pushed +## # sync with lloperation.py +## gct_stm_become_inevitable = _gct_with_roots_pushed +## gct_stm_partial_commit_and_resume_other_threads = _gct_with_roots_pushed +## gct_stm_perform_transaction = _gct_with_roots_pushed +## gct_stm_inspect_abort_info = _gct_with_roots_pushed +## gct_stm_threadlocalref_set = _gct_with_roots_pushed class StmRootWalker(BaseRootWalker): diff --git a/rpython/translator/stm/jitdriver.py b/rpython/translator/stm/jitdriver.py --- a/rpython/translator/stm/jitdriver.py +++ b/rpython/translator/stm/jitdriver.py @@ -148,6 +148,7 @@ def make_invoke_stm_function(self): CONTAINER = self.CONTAINER callback = self.callback_function + XXX perform_transaction = rstm.make_perform_transaction(callback, self.CONTAINERP) irange = range(len(self.TYPES)) From noreply at buildbot.pypy.org Wed Mar 12 08:24:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 08:24:10 +0100 (CET) Subject: [pypy-commit] pypy default: Fix shown by test_streamio Message-ID: <20140312072410.226D51C0460@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69886:2ab068f12ff3 Date: 2014-03-12 08:23 +0100 http://bitbucket.org/pypy/pypy/changeset/2ab068f12ff3/ Log: Fix shown by test_streamio diff --git a/rpython/rtyper/rtuple.py b/rpython/rtyper/rtuple.py --- a/rpython/rtyper/rtuple.py +++ b/rpython/rtyper/rtuple.py @@ -290,14 +290,15 @@ if not s_tup.is_constant(): raise TyperError("contains() on non-const tuple") t = s_tup.const - if len(t) == 0: - hop.exception_cannot_occur() - return hop.inputconst(Bool, False) + s_item = hop.args_s[1] r_item = hop.args_r[1] v_arg = hop.inputarg(r_item, arg=1) ll_eq = r_item.get_ll_eq_function() or _ll_equal v_result = None for x in t: + s_const_item = hop.rtyper.annotator.bookkeeper.immutablevalue(x) + if not s_item.contains(s_const_item): + continue # corner case, see test_constant_tuple_contains_bug c_tuple_item = hop.inputconst(r_item, x) v_equal = hop.gendirectcall(ll_eq, v_arg, c_tuple_item) if v_result is None: diff --git a/rpython/rtyper/test/test_rtuple.py b/rpython/rtyper/test/test_rtuple.py --- a/rpython/rtyper/test/test_rtuple.py +++ b/rpython/rtyper/test/test_rtuple.py @@ -95,6 +95,14 @@ res = self.interpret(f, [50]) assert res is False + def test_constant_tuple_contains_bug(self): + def f(i): + return chr(i) in ('1', '2', '34') # the '34' can never match + res = self.interpret(f, [ord('1')]) + assert res is True + res = self.interpret(f, [ord('3')]) + assert res is False + def test_conv(self): def t0(): return (3, 2, None) From noreply at buildbot.pypy.org Wed Mar 12 08:51:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 08:51:07 +0100 (CET) Subject: [pypy-commit] stmgc default: stm_is_inevitable(), and fix tests for inevitable transactions Message-ID: <20140312075107.D7CA51C301F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r976:e596369e8a2d Date: 2014-03-12 08:50 +0100 http://bitbucket.org/pypy/stmgc/changeset/e596369e8a2d/ Log: stm_is_inevitable(), and fix tests for inevitable transactions diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -248,6 +248,9 @@ if (STM_SEGMENT->jmpbuf_ptr != NULL) _stm_become_inevitable(msg); } +static inline int stm_is_inevitable(void) { + return (STM_SEGMENT->jmpbuf_ptr == NULL); +} /* Forces a safe-point if needed. Normally not needed: this is automatic if you call stm_allocate(). */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -46,7 +46,8 @@ void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); bool _check_commit_transaction(void); bool _check_abort_transaction(void); -bool _check_become_inevitable(); +bool _check_become_inevitable(void); +int stm_is_inevitable(void); void _set_type_id(object_t *obj, uint32_t h); uint32_t _get_type_id(object_t *obj); @@ -110,76 +111,47 @@ return obj->stm_flags; } +#define CHECKED(CALL) \ + stm_jmpbuf_t here; \ + stm_segment_info_t *segment = STM_SEGMENT; \ + if (__builtin_setjmp(here) == 0) { /* returned directly */ \ + if (segment->jmpbuf_ptr != NULL) { \ + assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); \ + segment->jmpbuf_ptr = &here; \ + } \ + CALL; \ + if (segment->jmpbuf_ptr != NULL) { \ + segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; \ + } \ + return 0; \ + } \ + if (segment->jmpbuf_ptr != NULL) { \ + segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; \ + } \ + return 1 + bool _checked_stm_write(object_t *object) { - stm_jmpbuf_t here; - stm_segment_info_t *segment = STM_SEGMENT; - if (__builtin_setjmp(here) == 0) { // returned directly - assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); - segment->jmpbuf_ptr = &here; - stm_write(object); - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; - return 0; - } - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; - return 1; + CHECKED(stm_write(object)); } bool _check_stop_safe_point(void) { - stm_jmpbuf_t here; - stm_segment_info_t *segment = STM_SEGMENT; - if (__builtin_setjmp(here) == 0) { // returned directly - assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); - segment->jmpbuf_ptr = &here; - _stm_stop_safe_point(); - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; - return 0; - } - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; - return 1; + CHECKED(_stm_stop_safe_point()); } bool _check_commit_transaction(void) { - stm_jmpbuf_t here; - stm_segment_info_t *segment = STM_SEGMENT; - if (__builtin_setjmp(here) == 0) { // returned directly - assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); - segment->jmpbuf_ptr = &here; - stm_commit_transaction(); - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; - return 0; - } - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; - return 1; + CHECKED(stm_commit_transaction()); } bool _check_abort_transaction(void) { - stm_jmpbuf_t here; - stm_segment_info_t *segment = STM_SEGMENT; - if (__builtin_setjmp(here) == 0) { // returned directly - assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); - segment->jmpbuf_ptr = &here; - stm_abort_transaction(); - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; - return 0; // but should be unreachable in this case - } - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; - return 1; + CHECKED(stm_abort_transaction()); } bool _check_become_inevitable() { - stm_jmpbuf_t here; - stm_segment_info_t *segment = STM_SEGMENT; - if (__builtin_setjmp(here) == 0) { // returned directly - assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); - segment->jmpbuf_ptr = &here; - stm_become_inevitable("TEST"); - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; - return 0; // but should be unreachable in this case - } - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; - return 1; + CHECKED(stm_become_inevitable("TEST")); } +#undef CHECKED + void _set_type_id(object_t *obj, uint32_t h) { diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -382,7 +382,9 @@ self.start_transaction() stm_write(lp1) stm_set_char(lp1, 'b') + assert lib.stm_is_inevitable() == 0 stm_become_inevitable() + assert lib.stm_is_inevitable() == 1 self.commit_transaction() # py.test.raises(Conflict, self.switch, 0) From noreply at buildbot.pypy.org Wed Mar 12 09:26:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 09:26:54 +0100 (CET) Subject: [pypy-commit] stmgc default: Tweak: expose this logic for pypy Message-ID: <20140312082654.C8D7F1C3143@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r977:83b7d2d60a20 Date: 2014-03-12 09:26 +0100 http://bitbucket.org/pypy/stmgc/changeset/83b7d2d60a20/ Log: Tweak: expose this logic for pypy diff --git a/c7/stm/fprintcolor.c b/c7/stm/fprintcolor.c --- a/c7/stm/fprintcolor.c +++ b/c7/stm/fprintcolor.c @@ -3,11 +3,6 @@ /* ------------------------------------------------------------ */ -static int dprintfcolor(void) -{ - return 31 + STM_SEGMENT->segment_num % 6; -} - static int threadcolor_printf(const char *format, ...) { char buffer[2048]; diff --git a/c7/stm/fprintcolor.h b/c7/stm/fprintcolor.h --- a/c7/stm/fprintcolor.h +++ b/c7/stm/fprintcolor.h @@ -7,7 +7,10 @@ #define dprintf(args) threadcolor_printf args -static int dprintfcolor(void); +static inline int dprintfcolor(void) +{ + return 31 + STM_SEGMENT->segment_num % 6; +} static int threadcolor_printf(const char *format, ...) __attribute__((format (printf, 1, 2))); From noreply at buildbot.pypy.org Wed Mar 12 09:30:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 09:30:56 +0100 (CET) Subject: [pypy-commit] stmgc default: Add an assert to prevent double-starting transactions Message-ID: <20140312083056.9F7321D28D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r978:3f0d8773b90b Date: 2014-03-12 09:30 +0100 http://bitbucket.org/pypy/stmgc/changeset/3f0d8773b90b/ Log: Add an assert to prevent double-starting transactions diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -137,6 +137,8 @@ void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) { + assert(!_stm_in_transaction(tl)); + s_mutex_lock(); retry: From noreply at buildbot.pypy.org Wed Mar 12 09:34:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 09:34:29 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Reintroduce stm_perform_transaction() and atomic transactions. Message-ID: <20140312083429.DFA501D28D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69887:e0337a4058f2 Date: 2014-03-12 09:33 +0100 http://bitbucket.org/pypy/pypy/changeset/e0337a4058f2/ Log: Reintroduce stm_perform_transaction() and atomic transactions. diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -66,11 +66,11 @@ @dont_look_inside def increment_atomic(): - llop.stm_change_atomic(lltype.Signed, +1) + llop.stm_increment_atomic(lltype.Void) @dont_look_inside def decrement_atomic(): - llop.stm_change_atomic(lltype.Signed, -1) + llop.stm_decrement_atomic(lltype.Void) @dont_look_inside def is_atomic(): @@ -96,7 +96,7 @@ def before_external_call(): if we_are_translated(): # this tries to commit, or becomes inevitable if atomic - llop.stm_commit_transaction(lltype.Void) + llop.stm_commit_if_not_atomic(lltype.Void) before_external_call._dont_reach_me_in_del_ = True before_external_call._transaction_break_ = True @@ -104,7 +104,7 @@ def after_external_call(): if we_are_translated(): # starts a new transaction if we are not atomic already - llop.stm_start_inevitable_transaction(lltype.Void) + llop.stm_start_inevitable_if_not_atomic(lltype.Void) after_external_call._dont_reach_me_in_del_ = True after_external_call._transaction_break_ = True diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -438,6 +438,7 @@ # see threadlocalref.py 'stm_threadlocal_get': LLOp(sideeffects=False), 'stm_threadlocal_set': LLOp(), + 'stm_perform_transaction':LLOp(canmallocgc=True), ## 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), ## 'stm_become_inevitable': LLOp(canmallocgc=True), diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -786,7 +786,8 @@ if node.forward_decl: print >> f, node.forward_decl elif node.name is not None: - print >> f, '%s %s;' % (node.typetag, node.name) + if node.typetag != '': + print >> f, '%s %s;' % (node.typetag, node.name) print >> f for node in structdeflist: for line in node.definition(): diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -134,13 +134,11 @@ return '/* %s = */ STM_POP_ROOT_RET(stm_thread_local);' % (arg0,) return 'STM_POP_ROOT(stm_thread_local, %s);' % (arg0,) -def stm_commit_transaction(funcgen, op): - return '{ int e = errno; stm_commit_transaction(); errno = e; }' +def stm_commit_if_not_atomic(funcgen, op): + return 'pypy_stm_commit_if_not_atomic();' -def stm_start_inevitable_transaction(funcgen, op): - return ('{ int e = errno; ' - 'stm_start_inevitable_transaction(&stm_thread_local); ' - 'errno = e; }') +def stm_start_inevitable_if_not_atomic(funcgen, op): + return 'pypy_stm_start_inevitable_if_not_atomic();' def stm_enter_callback_call(funcgen, op): result = funcgen.expr(op.result) @@ -167,6 +165,12 @@ arg0 = funcgen.expr(op.args[0]) return 'stm_thread_local.thread_local_obj = (object_t *)%s;' % (arg0,) +def stm_perform_transaction(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + arg1 = funcgen.expr(op.args[1]) + return ('pypy_stm_perform_transaction((object_t *)%s, ' + '(int(*)(object_t *, int))%s);' % (arg0, arg1)) + ##def stm_initialize(funcgen, op): ## return '''stm_initialize(); @@ -297,16 +301,6 @@ ## result = funcgen.expr(op.result) ## return '%s = ((struct rpyobj_s*)%s)->tid;' % (result, arg0) -##def stm_hash(funcgen, op): -## arg0 = funcgen.expr(op.args[0]) -## result = funcgen.expr(op.result) -## return '%s = stm_hash((gcptr)%s);' % (result, arg0) - -##def stm_id(funcgen, op): -## arg0 = funcgen.expr(op.args[0]) -## result = funcgen.expr(op.result) -## return '%s = stm_id((gcptr)%s);' % (result, arg0) - ##def stm_change_atomic(funcgen, op): ## arg0 = funcgen.expr(op.args[0]) ## return 'stm_atomic(%s);' % (arg0,) @@ -315,20 +309,6 @@ ## result = funcgen.expr(op.result) ## return '%s = stm_atomic(0);' % (result,) -##def stm_threadlocal_get(funcgen, op): -## result = funcgen.expr(op.result) -## return '%s = (%s)stm_thread_local_obj;' % ( -## result, cdecl(funcgen.lltypename(op.result), '')) - -##def stm_threadlocal_set(funcgen, op): -## arg0 = funcgen.expr(op.args[0]) -## return 'stm_thread_local_obj = (gcptr)%s;' % (arg0,) - -##def stm_perform_transaction(funcgen, op): -## arg0 = funcgen.expr(op.args[0]) -## arg1 = funcgen.expr(op.args[1]) -## return 'stm_perform_transaction((gcptr)%s, %s);' % (arg0, arg1) - ##def stm_enter_callback_call(funcgen, op): ## result = funcgen.expr(op.result) ## return '%s = stm_enter_callback_call();' % (result,) diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -3ce4c20d80e7 +3f0d8773b90b diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -138,6 +138,8 @@ void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) { + assert(!_stm_in_transaction(tl)); + s_mutex_lock(); retry: @@ -467,7 +469,7 @@ struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); /* throw away the content of the nursery */ - throw_away_nursery(pseg); + long bytes_in_nursery = throw_away_nursery(pseg); /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_segments(segment_num); @@ -477,6 +479,7 @@ stm_thread_local_t *tl = pseg->pub.running_thread; tl->shadowstack = pseg->shadowstack_at_start_of_transaction; tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; + tl->last_abort__bytes_in_nursery = bytes_in_nursery; /* reset these lists to NULL too on abort */ LIST_FREE(pseg->objects_pointing_to_nursery); diff --git a/rpython/translator/stm/src_stm/stm/fprintcolor.c b/rpython/translator/stm/src_stm/stm/fprintcolor.c --- a/rpython/translator/stm/src_stm/stm/fprintcolor.c +++ b/rpython/translator/stm/src_stm/stm/fprintcolor.c @@ -4,11 +4,6 @@ /* ------------------------------------------------------------ */ -static int dprintfcolor(void) -{ - return 31 + STM_SEGMENT->segment_num % 6; -} - static int threadcolor_printf(const char *format, ...) { char buffer[2048]; diff --git a/rpython/translator/stm/src_stm/stm/fprintcolor.h b/rpython/translator/stm/src_stm/stm/fprintcolor.h --- a/rpython/translator/stm/src_stm/stm/fprintcolor.h +++ b/rpython/translator/stm/src_stm/stm/fprintcolor.h @@ -8,7 +8,10 @@ #define dprintf(args) threadcolor_printf args -static int dprintfcolor(void); +static inline int dprintfcolor(void) +{ + return 31 + STM_SEGMENT->segment_num % 6; +} static int threadcolor_printf(const char *format, ...) __attribute__((format (printf, 1, 2))); diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -216,15 +216,15 @@ _collect_now(item)); } -static void throw_away_nursery(struct stm_priv_segment_info_s *pseg) +static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) { /* reset the nursery by zeroing it */ - size_t size; + size_t nursery_used; char *realnursery; realnursery = REAL_ADDRESS(pseg->pub.segment_base, _stm_nursery_start); - size = pseg->pub.nursery_current - (stm_char *)_stm_nursery_start; - memset(realnursery, 0, size); + nursery_used = pseg->pub.nursery_current - (stm_char *)_stm_nursery_start; + memset(realnursery, 0, nursery_used); pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; @@ -251,6 +251,7 @@ } tree_clear(pseg->nursery_objects_shadows); + return nursery_used; } #define MINOR_NOTHING_TO_DO(pseg) \ diff --git a/rpython/translator/stm/src_stm/stm/nursery.h b/rpython/translator/stm/src_stm/stm/nursery.h --- a/rpython/translator/stm/src_stm/stm/nursery.h +++ b/rpython/translator/stm/src_stm/stm/nursery.h @@ -12,7 +12,7 @@ static void minor_collection(bool commit); static void check_nursery_at_transaction_start(void); -static void throw_away_nursery(struct stm_priv_segment_info_s *pseg); +static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg); static void major_do_minor_collections(void); static inline bool must_abort(void) { diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -58,7 +58,10 @@ the following raw region of memory is cleared. */ char *mem_clear_on_abort; size_t mem_bytes_to_clear_on_abort; - /* the next fields are handled automatically by the library */ + /* after an abort, some details about the abort are stored there. + (these fields are not modified on a successful commit) */ + long last_abort__bytes_in_nursery; + /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; } stm_thread_local_t; @@ -246,6 +249,9 @@ if (STM_SEGMENT->jmpbuf_ptr != NULL) _stm_become_inevitable(msg); } +static inline int stm_is_inevitable(void) { + return (STM_SEGMENT->jmpbuf_ptr == NULL); +} /* Forces a safe-point if needed. Normally not needed: this is automatic if you call stm_allocate(). */ diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -4,6 +4,10 @@ __thread struct stm_thread_local_s stm_thread_local; +/* 0 = not initialized; 1 = normal mode; 2 or more = atomic mode */ +__thread long pypy_stm_ready_atomic; +__thread uintptr_t pypy_stm_nursery_low_fill_mark; + extern Signed pypy_stmcb_size_rounded_up(void*); extern void pypy_stmcb_trace(void*, void(*)(void*)); @@ -28,7 +32,7 @@ #define LOW_FILL_MARK 400000 -stm_char *pypy_stm_nursery_low_fill_mark; +static long pypy_transaction_length; void pypy_stm_set_transaction_length(long percentage) @@ -38,36 +42,124 @@ long low_fill_mark = LOW_FILL_MARK * percentage / 100; if (low_fill_mark > NURSERY_SIZE / 2) low_fill_mark = NURSERY_SIZE / 2; - pypy_stm_nursery_low_fill_mark = ((stm_char *)_stm_nursery_start) + - low_fill_mark; + pypy_transaction_length = low_fill_mark; } void pypy_stm_setup(void) { stm_setup(); stm_register_thread_local(&stm_thread_local); + pypy_stm_ready_atomic = 1; pypy_stm_set_transaction_length(100); - stm_start_inevitable_transaction(&stm_thread_local); + pypy_stm_start_inevitable_if_not_atomic(); } long pypy_stm_enter_callback_call(void) { - long token = 0; - - if (stm_thread_local.shadowstack == NULL) { + if (pypy_stm_ready_atomic == 0) { /* first time we see this thread */ - token = 1; + int e = errno; stm_register_thread_local(&stm_thread_local); + errno = e; + pypy_stm_ready_atomic = 1; + pypy_stm_start_inevitable_if_not_atomic(); + return 1; } - stm_start_inevitable_transaction(&stm_thread_local); - return token; + else { + /* callback from C code, itself called from Python code */ + pypy_stm_start_inevitable_if_not_atomic(); + return 0; + } } void pypy_stm_leave_callback_call(long token) { - stm_commit_transaction(); if (token == 1) { + /* if we're returning into foreign C code that was not itself + called from Python code, then we're ignoring the atomic + status and committing anyway. */ + int e = errno; + pypy_stm_ready_atomic = 1; + stm_commit_transaction(); + pypy_stm_ready_atomic = 0; stm_unregister_thread_local(&stm_thread_local); - assert(stm_thread_local.shadowstack == NULL); + errno = e; + } + else { + pypy_stm_commit_if_not_atomic(); } } + +void pypy_stm_perform_transaction(object_t *arg, int callback(object_t *, int)) +{ /* must save roots around this call */ + stm_jmpbuf_t jmpbuf; + long volatile v_counter = 0; +#ifndef NDEBUG + object_t **volatile old_shadowstack = stm_thread_local.shadowstack; +#endif + + STM_PUSH_ROOT(stm_thread_local, arg); + /*STM_PUSH_ROOT(END_MARKER_OFF); XXX redo this optimization */ + + while (1) { + + if (pypy_stm_ready_atomic == 1) { + stm_commit_transaction(); + STM_START_TRANSACTION(&stm_thread_local, jmpbuf); + } + + /* After setjmp(), the local variables v_* are preserved because they + * are volatile. The other variables are only declared here. */ + long counter, result; + counter = v_counter; + v_counter = counter + 1; + + /* If counter==0, initialize 'pypy_stm_nursery_low_fill_mark' + from the configured length limit. If counter>0, we did an + abort, and we can now configure 'pypy_stm_nursery_low_fill_mark' + to a value slightly smaller than the value at last abort. + */ + if (stm_is_inevitable()) { + pypy_stm_nursery_low_fill_mark = 0; + } + else { + long limit; + if (counter == 0) { + limit = pypy_transaction_length; + } + else { + limit = stm_thread_local.last_abort__bytes_in_nursery; + limit -= (limit >> 4); + } + pypy_stm_nursery_low_fill_mark = _stm_nursery_start + limit; + } + + /* invoke the callback in the new transaction */ + STM_POP_ROOT(stm_thread_local, arg); + assert(old_shadowstack == stm_thread_local.shadowstack); + STM_PUSH_ROOT(stm_thread_local, arg); + result = callback(arg, counter); + if (result <= 0) + break; + v_counter = 0; + } + + if (STM_SEGMENT->jmpbuf_ptr == &jmpbuf) { + /* we can't leave this function leaving a non-inevitable + transaction whose jmpbuf points into this function + */ + if (pypy_stm_ready_atomic == 1) { + stm_commit_transaction(); + stm_start_inevitable_transaction(&stm_thread_local); + pypy_stm_nursery_low_fill_mark = 0; + } + else { + _stm_become_inevitable("perform_transaction left with atomic"); + } + } + + //gcptr x = stm_pop_root(); /* pop the END_MARKER */ + //assert(x == END_MARKER_OFF || x == END_MARKER_ON); + STM_POP_ROOT_RET(stm_thread_local); /* pop the 'arg' */ + assert(old_shadowstack == stm_thread_local.shadowstack); +} diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -4,24 +4,55 @@ /* meant to be #included after src_stm/stmgc.h */ +#include #include "stmgc.h" #include "stm/atomic.h" /* for spin_loop() and write_fence() */ extern __thread struct stm_thread_local_s stm_thread_local; -extern stm_char *pypy_stm_nursery_low_fill_mark; +extern __thread long pypy_stm_ready_atomic; +extern __thread uintptr_t pypy_stm_nursery_low_fill_mark; void pypy_stm_setup(void); void pypy_stm_setup_prebuilt(void); /* generated into stm_prebuilt.c */ + +static inline void pypy_stm_commit_if_not_atomic(void) { + if (pypy_stm_ready_atomic == 1) { + int e = errno; + stm_commit_transaction(); + errno = e; + } +} +static inline void pypy_stm_start_inevitable_if_not_atomic(void) { + if (pypy_stm_ready_atomic == 1) { + int e = errno; + stm_start_inevitable_transaction(&stm_thread_local); + pypy_stm_nursery_low_fill_mark = 0; + errno = e; + } +} +static inline void pypy_stm_increment_atomic(void) { + pypy_stm_ready_atomic++; +} +static inline void pypy_stm_decrement_atomic(void) { + if (--pypy_stm_ready_atomic == 0) + pypy_stm_ready_atomic = 1; +} +static inline long pypy_stm_get_atomic(void) { + return pypy_stm_ready_atomic - 1; +} long pypy_stm_enter_callback_call(void); void pypy_stm_leave_callback_call(long); void pypy_stm_set_transaction_length(long); +void pypy_stm_perform_transaction(object_t *, int(object_t *, int)); static inline int pypy_stm_should_break_transaction(void) { /* we should break the current transaction if we have used more than - some initial portion of the nursery, or if we are running inevitable */ - return (STM_SEGMENT->nursery_current >= pypy_stm_nursery_low_fill_mark || - STM_SEGMENT->jmpbuf_ptr == NULL); + some initial portion of the nursery, or if we are running inevitable + (in which case pypy_stm_nursery_low_fill_mark is set to 0) + */ + uintptr_t current = (uintptr_t)STM_SEGMENT->nursery_current; + return current >= pypy_stm_nursery_low_fill_mark; } From noreply at buildbot.pypy.org Wed Mar 12 09:37:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 09:37:17 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fixes for test_start_thread Message-ID: <20140312083717.326091D28D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69888:69a3715048f7 Date: 2014-03-12 09:36 +0100 http://bitbucket.org/pypy/pypy/changeset/69a3715048f7/ Log: Fixes for test_start_thread diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -427,18 +427,19 @@ 'stm_become_inevitable': LLOp(canmallocgc=True), 'stm_push_root': LLOp(), 'stm_pop_root_into': LLOp(), - 'stm_commit_transaction': LLOp(canmallocgc=True), - 'stm_start_inevitable_transaction': LLOp(canmallocgc=True), - 'stm_enter_callback_call':LLOp(canmallocgc=True), - 'stm_leave_callback_call':LLOp(), - 'stm_should_break_transaction': LLOp(sideeffects=False), - 'stm_set_transaction_length': LLOp(), + 'stm_commit_if_not_atomic': LLOp(canmallocgc=True), + 'stm_start_inevitable_if_not_atomic': LLOp(canmallocgc=True), + 'stm_enter_callback_call': LLOp(canmallocgc=True), + 'stm_leave_callback_call': LLOp(), + 'stm_perform_transaction': LLOp(canmallocgc=True), + 'stm_should_break_transaction': LLOp(sideeffects=False), + 'stm_set_transaction_length': LLOp(), + 'stm_threadlocalref_get': LLOp(sideeffects=False), 'stm_threadlocalref_set': LLOp(canmallocgc=True), # may allocate new array, # see threadlocalref.py 'stm_threadlocal_get': LLOp(sideeffects=False), 'stm_threadlocal_set': LLOp(), - 'stm_perform_transaction':LLOp(canmallocgc=True), ## 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), ## 'stm_become_inevitable': LLOp(canmallocgc=True), diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -71,8 +71,8 @@ glob.seen = None rthread.start_new_thread(threadfn, ()) while glob.seen is None: - llop.stm_commit_transaction(lltype.Void) - llop.stm_start_inevitable_transaction(lltype.Void) + llop.stm_commit_if_not_atomic(lltype.Void) + llop.stm_start_inevitable_if_not_atomic(lltype.Void) return glob.seen.value # t, cbuilder = self.compile(entry_point) From noreply at buildbot.pypy.org Wed Mar 12 09:39:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 09:39:52 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fixes for test_stm_atomic Message-ID: <20140312083952.6AD521C029E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69889:fafe0f9d754c Date: 2014-03-12 09:39 +0100 http://bitbucket.org/pypy/pypy/changeset/fafe0f9d754c/ Log: Fixes for test_stm_atomic diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -441,6 +441,10 @@ 'stm_threadlocal_get': LLOp(sideeffects=False), 'stm_threadlocal_set': LLOp(), + 'stm_increment_atomic': LLOp(), + 'stm_decrement_atomic': LLOp(), + 'stm_get_atomic': LLOp(sideeffects=False), + ## 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), ## 'stm_become_inevitable': LLOp(canmallocgc=True), ## 'stm_stop_all_other_threads': LLOp(canmallocgc=True), @@ -449,9 +453,6 @@ ## 'stm_major_collect': LLOp(canmallocgc=True), ## 'stm_get_tid': LLOp(canfold=True), ## 'stm_ptr_eq': LLOp(canfold=True), -## 'stm_change_atomic': LLOp(), -## 'stm_get_atomic': LLOp(sideeffects=False), -## 'stm_perform_transaction':LLOp(canmallocgc=True), ## 'stm_abort_and_retry': LLOp(canmallocgc=True), ## 'stm_weakref_allocate': LLOp(sideeffects=False, canmallocgc=True), diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -171,6 +171,16 @@ return ('pypy_stm_perform_transaction((object_t *)%s, ' '(int(*)(object_t *, int))%s);' % (arg0, arg1)) +def stm_increment_atomic(funcgen, op): + return 'pypy_stm_increment_atomic();' + +def stm_decrement_atomic(funcgen, op): + return 'pypy_stm_decrement_atomic();' + +def stm_get_atomic(funcgen, op): + result = funcgen.expr(op.result) + return '%s = pypy_stm_get_atomic();' % (result,) + ##def stm_initialize(funcgen, op): ## return '''stm_initialize(); @@ -301,14 +311,6 @@ ## result = funcgen.expr(op.result) ## return '%s = ((struct rpyobj_s*)%s)->tid;' % (result, arg0) -##def stm_change_atomic(funcgen, op): -## arg0 = funcgen.expr(op.args[0]) -## return 'stm_atomic(%s);' % (arg0,) - -##def stm_get_atomic(funcgen, op): -## result = funcgen.expr(op.result) -## return '%s = stm_atomic(0);' % (result,) - ##def stm_enter_callback_call(funcgen, op): ## result = funcgen.expr(op.result) ## return '%s = stm_enter_callback_call();' % (result,) From noreply at buildbot.pypy.org Wed Mar 12 09:45:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 09:45:47 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Enough to make the rstm.abort_info* functions not crash, but so far they don't record anything. Message-ID: <20140312084547.BD8A41C029E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69890:a701941eff30 Date: 2014-03-12 09:45 +0100 http://bitbucket.org/pypy/pypy/changeset/a701941eff30/ Log: Enough to make the rstm.abort_info* functions not crash, but so far they don't record anything. diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -429,6 +429,7 @@ 'stm_pop_root_into': LLOp(), 'stm_commit_if_not_atomic': LLOp(canmallocgc=True), 'stm_start_inevitable_if_not_atomic': LLOp(canmallocgc=True), + 'stm_abort_and_retry': LLOp(canmallocgc=True), 'stm_enter_callback_call': LLOp(canmallocgc=True), 'stm_leave_callback_call': LLOp(), 'stm_perform_transaction': LLOp(canmallocgc=True), @@ -445,6 +446,10 @@ 'stm_decrement_atomic': LLOp(), 'stm_get_atomic': LLOp(sideeffects=False), + 'stm_abort_info_push': LLOp(), + 'stm_abort_info_pop': LLOp(), + 'stm_inspect_abort_info': LLOp(sideeffects=False, canmallocgc=True), + ## 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), ## 'stm_become_inevitable': LLOp(canmallocgc=True), ## 'stm_stop_all_other_threads': LLOp(canmallocgc=True), @@ -453,19 +458,8 @@ ## 'stm_major_collect': LLOp(canmallocgc=True), ## 'stm_get_tid': LLOp(canfold=True), ## 'stm_ptr_eq': LLOp(canfold=True), -## 'stm_abort_and_retry': LLOp(canmallocgc=True), ## 'stm_weakref_allocate': LLOp(sideeffects=False, canmallocgc=True), - -## 'stm_threadlocalref_get': LLOp(sideeffects=False), -## 'stm_threadlocalref_set': LLOp(canmallocgc=True), # may allocate new array, -## # see threadlocalref.py -## 'stm_threadlocal_get': LLOp(sideeffects=False), -## 'stm_threadlocal_set': LLOp(), - -## 'stm_abort_info_push': LLOp(), -## 'stm_abort_info_pop': LLOp(), -## 'stm_inspect_abort_info': LLOp(sideeffects=False, canmallocgc=True), ## 'stm_get_adr_of_private_rev_num':LLOp(), ## 'stm_get_adr_of_read_barrier_cache':LLOp(), diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -181,6 +181,22 @@ result = funcgen.expr(op.result) return '%s = pypy_stm_get_atomic();' % (result,) +def stm_abort_and_retry(funcgen, op): + return 'stm_abort_transaction();' + +def stm_abort_info_push(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + arg1 = funcgen.expr(op.args[1]) + return '//XXX stm_abort_info_push((gcptr)%s, %s);' % (arg0, arg1) + +def stm_abort_info_pop(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + return '//XXX stm_abort_info_pop(%s);' % (arg0,) + +def stm_inspect_abort_info(funcgen, op): + result = funcgen.expr(op.result) + return '%s = NULL; //XXX stm_inspect_abort_info();' % (result,) + ##def stm_initialize(funcgen, op): ## return '''stm_initialize(); @@ -319,22 +335,6 @@ ## arg0 = funcgen.expr(op.args[0]) ## return 'stm_leave_callback_call(%s);' % (arg0,) -##def stm_abort_and_retry(funcgen, op): -## return 'stm_abort_and_retry();' - -##def stm_abort_info_push(funcgen, op): -## arg0 = funcgen.expr(op.args[0]) -## arg1 = funcgen.expr(op.args[1]) -## return 'stm_abort_info_push((gcptr)%s, %s);' % (arg0, arg1) - -##def stm_abort_info_pop(funcgen, op): -## arg0 = funcgen.expr(op.args[0]) -## return 'stm_abort_info_pop(%s);' % (arg0,) - -##def stm_inspect_abort_info(funcgen, op): -## result = funcgen.expr(op.result) -## return '%s = stm_inspect_abort_info();' % (result,) - ##def stm_minor_collect(funcgen, op): ## return 'stm_minor_collect();' diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -255,7 +255,10 @@ rstm.abort_and_retry() # last = rstm.charp_inspect_abort_info() - print rffi.charp2str(last) + if last: + print rffi.charp2str(last) + else: + print 'got abort_info=NULL!' print int(bool(rstm.charp_inspect_abort_info())) # rstm.abort_info_pop(2) From noreply at buildbot.pypy.org Wed Mar 12 09:48:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 09:48:23 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix (stm/test/test_ztranslated, test_dtoa) Message-ID: <20140312084823.C79131C0460@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69891:08d0f40dfe46 Date: 2014-03-12 09:47 +0100 http://bitbucket.org/pypy/pypy/changeset/08d0f40dfe46/ Log: Fix (stm/test/test_ztranslated, test_dtoa) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -130,6 +130,14 @@ def copy_raw_to_string(ptrsrc, dst, dststart, length): # xxx Warning: same note as above apply: don't do this at home assert length >= 0 + + if rgc.stm_is_enabled(): + i = 0 + while i < length: + dst.chars[dststart + i] = ptrsrc[i] + i += 1 + return + # from here, no GC operations can happen dst = _get_raw_buf(SRC_TP, dst, dststart) adr = llmemory.cast_ptr_to_adr(ptrsrc) From noreply at buildbot.pypy.org Wed Mar 12 10:46:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 10:46:59 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Reimplement stm_ignored Message-ID: <20140312094659.0D8771C029E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69892:0fc7602e53d3 Date: 2014-03-12 10:46 +0100 http://bitbucket.org/pypy/pypy/changeset/0fc7602e53d3/ Log: Reimplement stm_ignored diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -5,6 +5,7 @@ BaseFrameworkGCTransformer, BaseRootWalker, sizeofaddr) from rpython.memory.gctypelayout import WEAKREF, WEAKREFPTR from rpython.rtyper import rmodel, llannotation +from rpython.translator.backendopt.support import var_needsgc class StmFrameworkGCTransformer(BaseFrameworkGCTransformer): @@ -52,6 +53,25 @@ for var in reversed(livevars): hop.genop("stm_pop_root_into", [var]) + def transform_block(self, *args, **kwds): + self.in_stm_ignored = False + BaseFrameworkGCTransformer.transform_block(self, *args, **kwds) + assert not self.in_stm_ignored, ( + "unbalanced stm_ignore_start/stm_ignore_stop in block") + + def gct_stm_ignored_start(self, hop): + assert not self.in_stm_ignored + self.in_stm_ignored = True + self.default(hop) + + def gct_stm_ignored_stop(self, hop): + assert self.in_stm_ignored + self.in_stm_ignored = False + self.default(hop) + + def var_needs_set_transform(self, var): + return True + def transform_generic_set(self, hop): # XXX detect if we're inside a 'stm_ignored' block and... do what? assert self.write_barrier_ptr == "stm" @@ -61,8 +81,13 @@ 'raw_store') if (v_struct.concretetype.TO._gckind == "gc" and hop.spaceop not in self.clean_sets): - self.write_barrier_calls += 1 - hop.genop("stm_write", [v_struct]) + if self.in_stm_ignored: + if var_needsgc(hop.spaceop.args[-1]): + raise Exception("in stm_ignored block: write of a gc " + "pointer") + else: + self.write_barrier_calls += 1 + hop.genop("stm_write", [v_struct]) hop.rename('bare_' + opname) def gc_header_for(self, obj, needs_hash=False): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -450,6 +450,9 @@ 'stm_abort_info_pop': LLOp(), 'stm_inspect_abort_info': LLOp(sideeffects=False, canmallocgc=True), + 'stm_ignored_start': LLOp(canrun=True), + 'stm_ignored_stop': LLOp(canrun=True), + ## 'stm_allocate_nonmovable_int_adr': LLOp(sideeffects=False, canmallocgc=True), ## 'stm_become_inevitable': LLOp(canmallocgc=True), ## 'stm_stop_all_other_threads': LLOp(canmallocgc=True), @@ -467,9 +470,6 @@ ## 'stm_get_adr_of_nursery_nextlimit': LLOp(), ## 'stm_get_adr_of_active': LLOp(), -## 'stm_ignored_start': LLOp(canrun=True), -## 'stm_ignored_stop': LLOp(canrun=True), - # __________ address operations __________ 'boehm_malloc': LLOp(), diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -197,6 +197,12 @@ result = funcgen.expr(op.result) return '%s = NULL; //XXX stm_inspect_abort_info();' % (result,) +def stm_ignored_start(funcgen, op): + return '/* stm_ignored_start */' + +def stm_ignored_stop(funcgen, op): + return '/* stm_ignored_stop */' + ##def stm_initialize(funcgen, op): ## return '''stm_initialize(); diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -23,10 +23,20 @@ if not block.operations: continue newops = [] + stm_ignored = False for op in block.operations: if op.opname in READ_OPS and is_gc_ptr(op.args[0].concretetype): - v_none = varoftype(lltype.Void) - newops.append(SpaceOperation('stm_read', [op.args[0]], v_none)) - transformer.read_barrier_counts += 1 + if not stm_ignored: + v_none = varoftype(lltype.Void) + newops.append(SpaceOperation('stm_read', + [op.args[0]], v_none)) + transformer.read_barrier_counts += 1 + elif op.opname == 'stm_ignored_start': + assert stm_ignored == False + stm_ignored = True + elif op.opname == 'stm_ignored_stop': + assert stm_ignored == True + stm_ignored = False newops.append(op) + assert stm_ignored == False block.operations = newops diff --git a/rpython/translator/stm/test/test_readbarrier.py b/rpython/translator/stm/test/test_readbarrier.py --- a/rpython/translator/stm/test/test_readbarrier.py +++ b/rpython/translator/stm/test/test_readbarrier.py @@ -1,3 +1,4 @@ +from rpython.rlib.objectmodel import stm_ignored from rpython.translator.stm.test.transform_support import BaseTestTransform from rpython.rtyper.lltypesystem import lltype @@ -24,3 +25,14 @@ res = self.interpret(f1, [-5]) assert res == 42 assert self.read_barriers == [x1] + + def test_stm_ignored_read(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + x1 = lltype.malloc(X, immortal=True) + x1.foo = 42 + def f1(): + with stm_ignored: + return x1.foo + res = self.interpret(f1, []) + assert res == 42 + assert self.read_barriers == [] diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -395,3 +395,27 @@ lines = dataerr.split('\n') assert lines[0] == ' 0.400000' assert lines[1] == ' 1.200000' + + def test_stm_ignored(self): + class X: + foo = 84 + prebuilt = X() + prebuilt2 = X() + def main(argv): + with objectmodel.stm_ignored: + prebuilt.foo = 42 + with objectmodel.stm_ignored: + x = prebuilt2.foo + print 'did not crash', x + return 0 + + t, cbuilder = self.compile(main) + opnames = [op.opname for op in t.graphs[0].startblock.operations] + assert opnames[:6] == ['stm_ignored_start', + 'bare_setfield', # with no stm_write + 'stm_ignored_stop', + 'stm_ignored_start', + 'getfield', # with no stm_read + 'stm_ignored_stop'] + data = cbuilder.cmdexec('') + assert 'did not crash 84\n' in data diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -87,19 +87,18 @@ self.op_stm_read(obj) # implicitly counts as a read barrier too def op_stm_ignored_start(self): - xxx assert self.stm_ignored == False self.stm_ignored = True def op_stm_ignored_stop(self): - xxx assert self.stm_ignored == True self.stm_ignored = False def op_getfield(self, obj, field): if obj._TYPE.TO._gckind == 'gc': if obj._TYPE.TO._immutable_field(field): - self.gcptrs_actually_read.append(obj) + if not self.stm_ignored: + self.gcptrs_actually_read.append(obj) return LLFrame.op_getfield(self, obj, field) def op_setfield(self, obj, fieldname, fieldvalue): From noreply at buildbot.pypy.org Wed Mar 12 11:08:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 11:08:53 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fixes Message-ID: <20140312100853.3E5891C0460@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69893:c6329f85e8bc Date: 2014-03-12 11:08 +0100 http://bitbucket.org/pypy/pypy/changeset/c6329f85e8bc/ Log: Fixes diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -64,7 +64,8 @@ self.instrument_ncounter = 0 - self.with_stm = self.translator.config.translation.stm + self.with_stm = (self.translator is not None and + self.translator.config.translation.stm) self.prebuilt_gc_counter = 0 def gettypedefnode(self, T, varlength=None): diff --git a/rpython/translator/stm/jitdriver.py b/rpython/translator/stm/jitdriver.py --- a/rpython/translator/stm/jitdriver.py +++ b/rpython/translator/stm/jitdriver.py @@ -148,7 +148,6 @@ def make_invoke_stm_function(self): CONTAINER = self.CONTAINER callback = self.callback_function - XXX perform_transaction = rstm.make_perform_transaction(callback, self.CONTAINERP) irange = range(len(self.TYPES)) From noreply at buildbot.pypy.org Wed Mar 12 11:27:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 11:27:15 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: "Support" the stm version of thread._local in the absence of rweakref. Message-ID: <20140312102715.21D801C3058@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69894:84fac834f29b Date: 2014-03-12 11:25 +0100 http://bitbucket.org/pypy/pypy/changeset/84fac834f29b/ Log: "Support" the stm version of thread._local in the absence of rweakref. diff --git a/pypy/module/thread/stm.py b/pypy/module/thread/stm.py --- a/pypy/module/thread/stm.py +++ b/pypy/module/thread/stm.py @@ -9,16 +9,30 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty, descr_get_dict from rpython.rlib import rthread from rpython.rlib import rstm -from rpython.rlib import rweakref from rpython.rlib import jit from rpython.rlib.objectmodel import invoke_around_extcall, we_are_translated +class FakeWeakKeyDictionary: + # Only used if we don't have weakrefs. + # Then thread._local instances will leak, but too bad. + def __init__(self): + self.d = {} + def get(self, key): + return self.d.get(key, None) + def set(self, key, value): + self.d[key] = value + + ec_cache = rstm.ThreadLocalReference(ExecutionContext) def initialize_execution_context(ec): """Called from ExecutionContext.__init__().""" - ec._thread_local_dicts = rweakref.RWeakKeyDictionary(STMLocal, W_Root) + if ec.space.config.translation.rweakref: + from rpython.rlib import rweakref + ec._thread_local_dicts = rweakref.RWeakKeyDictionary(STMLocal, W_Root) + else: + ec._thread_local_dicts = FakeWeakKeyDictionary() if ec.space.config.objspace.std.withmethodcache: from pypy.objspace.std.typeobject import MethodCache ec._methodcache = MethodCache(ec.space) From noreply at buildbot.pypy.org Wed Mar 12 11:47:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 11:47:30 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Test and fix for inf/nan values in prebuilt structures Message-ID: <20140312104730.8A9C81C31B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69895:8384a52fc3af Date: 2014-03-12 11:46 +0100 http://bitbucket.org/pypy/pypy/changeset/8384a52fc3af/ Log: Test and fix for inf/nan values in prebuilt structures diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -824,6 +824,11 @@ print >> f, 'char *RPython_StartupCode(void) {' print >> f, '\tchar *error = NULL;' + # put float infinities in global constants, we should not have so many of them for now to make + # a table+loop preferable + for dest, value in database.late_initializations: + print >> f, "\t%s = %s;" % (dest, value) + if database.with_stm: print >> f, '\tpypy_stm_setup();' print >> f, '\tpypy_stm_setup_prebuilt();' @@ -831,11 +836,6 @@ for line in database.gcpolicy.gc_startup_code(): print >> f,"\t" + line - # put float infinities in global constants, we should not have so many of them for now to make - # a table+loop preferable - for dest, value in database.late_initializations: - print >> f, "\t%s = %s;" % (dest, value) - firsttime = True for node in database.containerlist: lines = list(node.startupcode()) diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -816,7 +816,6 @@ else: comma = ',' if typeOf(value) == Float and not isfinite(value): - XXX # check and reimplement me db.late_initializations.append(('%s' % access_expr, db.get(value, static=True))) if isinf(value): diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -419,3 +419,17 @@ 'stm_ignored_stop'] data = cbuilder.cmdexec('') assert 'did not crash 84\n' in data + + def test_float_inf_nan_in_struct(self): + mylist = [float("inf"), float("-inf"), float("nan")] + def main(argv): + print ':', mylist[int(argv[1])] + return 0 + + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('0') + assert ': inf\n' in data + data = cbuilder.cmdexec('1') + assert ': -inf\n' in data + data = cbuilder.cmdexec('2') + assert ': nan\n' in data From noreply at buildbot.pypy.org Wed Mar 12 12:58:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 12:58:09 +0100 (CET) Subject: [pypy-commit] pypy default: Rewrite rffi.str_from_buffer() in terms of copy_raw_to_string() Message-ID: <20140312115809.476AE1C3058@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69896:938e7328779b Date: 2014-03-12 12:11 +0100 http://bitbucket.org/pypy/pypy/changeset/938e7328779b/ Log: Rewrite rffi.str_from_buffer() in terms of copy_raw_to_string() rather than directly using raw_memcopy. diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -4,7 +4,7 @@ from rpython.rtyper.lltypesystem import lltype, rstr from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.lltypesystem.llmemory import cast_ptr_to_adr -from rpython.rtyper.lltypesystem.llmemory import itemoffsetof, raw_memcopy +from rpython.rtyper.lltypesystem.llmemory import itemoffsetof from rpython.rtyper.llannotation import lltype_to_annotation from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.objectmodel import Symbolic @@ -679,7 +679,9 @@ if strtype is str: from rpython.rtyper.lltypesystem.rstr import (STR as STRTYPE, - copy_string_to_raw) + copy_string_to_raw, + copy_raw_to_string, + copy_string_contents) from rpython.rtyper.annlowlevel import llstr as llstrtype from rpython.rtyper.annlowlevel import hlstr as hlstrtype TYPEP = CCHARP @@ -689,7 +691,9 @@ else: from rpython.rtyper.lltypesystem.rstr import ( UNICODE as STRTYPE, - copy_unicode_to_raw as copy_string_to_raw) + copy_unicode_to_raw as copy_string_to_raw, + copy_raw_to_unicode as copy_raw_to_string, + copy_unicode_contents as copy_string_contents) from rpython.rtyper.annlowlevel import llunicode as llstrtype from rpython.rtyper.annlowlevel import hlunicode as hlstrtype TYPEP = CWCHARP @@ -803,17 +807,10 @@ return hlstrtype(gc_buf) new_buf = lltype.malloc(STRTYPE, needed_size) - str_chars_offset = (offsetof(STRTYPE, 'chars') + \ - itemoffsetof(STRTYPE.chars, 0)) if gc_buf: - src = cast_ptr_to_adr(gc_buf) + str_chars_offset + copy_string_contents(gc_buf, new_buf, 0, 0, needed_size) else: - src = cast_ptr_to_adr(raw_buf) + itemoffsetof(TYPEP.TO, 0) - dest = cast_ptr_to_adr(new_buf) + str_chars_offset - raw_memcopy(src, dest, - llmemory.sizeof(ll_char_type) * needed_size) - keepalive_until_here(gc_buf) - keepalive_until_here(new_buf) + copy_raw_to_string(raw_buf, new_buf, 0, needed_size) return hlstrtype(new_buf) # (char*, str) -> None From noreply at buildbot.pypy.org Wed Mar 12 12:58:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 12:58:10 +0100 (CET) Subject: [pypy-commit] pypy default: More precision, needed for 938e7328779b Message-ID: <20140312115810.7F8821C3058@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69897:c0059044fa35 Date: 2014-03-12 12:18 +0100 http://bitbucket.org/pypy/pypy/changeset/c0059044fa35/ Log: More precision, needed for 938e7328779b diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py --- a/rpython/rtyper/llannotation.py +++ b/rpython/rtyper/llannotation.py @@ -6,7 +6,7 @@ from rpython.annotator.model import ( SomeObject, SomeSingleFloat, SomeFloat, SomeLongFloat, SomeChar, SomeUnicodeCodePoint, SomeInteger, SomeString, SomeImpossibleValue, - s_None, s_Bool, UnionError, AnnotatorError) + s_None, s_Bool, UnionError, AnnotatorError, SomeBool) from rpython.rtyper.lltypesystem import lltype, llmemory class SomeAddress(SomeObject): @@ -155,7 +155,10 @@ return ll_to_annotation(v) def bool(self): - return s_Bool + result = SomeBool() + if self.is_constant(): + result.const = bool(self.const) + return result class SomeInteriorPtr(SomePtr): From noreply at buildbot.pypy.org Wed Mar 12 12:58:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 12:58:11 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Rewrite rffi.str_from_buffer() in terms of copy_raw_to_string() Message-ID: <20140312115811.B2B921C3058@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69898:8a8455c7eddf Date: 2014-03-12 12:11 +0100 http://bitbucket.org/pypy/pypy/changeset/8a8455c7eddf/ Log: Rewrite rffi.str_from_buffer() in terms of copy_raw_to_string() rather than directly using raw_memcopy. diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -4,7 +4,7 @@ from rpython.rtyper.lltypesystem import lltype, rstr from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.lltypesystem.llmemory import cast_ptr_to_adr -from rpython.rtyper.lltypesystem.llmemory import itemoffsetof, raw_memcopy +from rpython.rtyper.lltypesystem.llmemory import itemoffsetof from rpython.rtyper.llannotation import lltype_to_annotation from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.objectmodel import Symbolic @@ -695,7 +695,9 @@ if strtype is str: from rpython.rtyper.lltypesystem.rstr import (STR as STRTYPE, - copy_string_to_raw) + copy_string_to_raw, + copy_raw_to_string, + copy_string_contents) from rpython.rtyper.annlowlevel import llstr as llstrtype from rpython.rtyper.annlowlevel import hlstr as hlstrtype TYPEP = CCHARP @@ -705,7 +707,9 @@ else: from rpython.rtyper.lltypesystem.rstr import ( UNICODE as STRTYPE, - copy_unicode_to_raw as copy_string_to_raw) + copy_unicode_to_raw as copy_string_to_raw, + copy_raw_to_unicode as copy_raw_to_string, + copy_unicode_contents as copy_string_contents) from rpython.rtyper.annlowlevel import llunicode as llstrtype from rpython.rtyper.annlowlevel import hlunicode as hlstrtype TYPEP = CWCHARP @@ -823,17 +827,10 @@ return hlstrtype(gc_buf) new_buf = lltype.malloc(STRTYPE, needed_size) - str_chars_offset = (offsetof(STRTYPE, 'chars') + \ - itemoffsetof(STRTYPE.chars, 0)) if gc_buf: - src = cast_ptr_to_adr(gc_buf) + str_chars_offset + copy_string_contents(gc_buf, new_buf, 0, 0, needed_size) else: - src = cast_ptr_to_adr(raw_buf) + itemoffsetof(TYPEP.TO, 0) - dest = cast_ptr_to_adr(new_buf) + str_chars_offset - raw_memcopy(src, dest, - llmemory.sizeof(ll_char_type) * needed_size) - keepalive_until_here(gc_buf) - keepalive_until_here(new_buf) + copy_raw_to_string(raw_buf, new_buf, 0, needed_size) return hlstrtype(new_buf) # (char*, str) -> None From noreply at buildbot.pypy.org Wed Mar 12 12:58:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 12:58:12 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: More precision, needed for 938e7328779b Message-ID: <20140312115812.D4E921C3058@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69899:b333e74ef7d0 Date: 2014-03-12 12:18 +0100 http://bitbucket.org/pypy/pypy/changeset/b333e74ef7d0/ Log: More precision, needed for 938e7328779b diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py --- a/rpython/rtyper/llannotation.py +++ b/rpython/rtyper/llannotation.py @@ -6,7 +6,7 @@ from rpython.annotator.model import ( SomeObject, SomeSingleFloat, SomeFloat, SomeLongFloat, SomeChar, SomeUnicodeCodePoint, SomeInteger, SomeString, SomeImpossibleValue, - s_None, s_Bool, UnionError, AnnotatorError) + s_None, s_Bool, UnionError, AnnotatorError, SomeBool) from rpython.rtyper.lltypesystem import lltype, llmemory class SomeAddress(SomeObject): @@ -155,7 +155,10 @@ return ll_to_annotation(v) def bool(self): - return s_Bool + result = SomeBool() + if self.is_constant(): + result.const = bool(self.const) + return result class SomeInteriorPtr(SomePtr): From noreply at buildbot.pypy.org Wed Mar 12 12:58:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 12:58:14 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Temporary disable inspector.py Message-ID: <20140312115814.0861E1C3058@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69900:3d9b35f2c84a Date: 2014-03-12 12:57 +0100 http://bitbucket.org/pypy/pypy/changeset/3d9b35f2c84a/ Log: Temporary disable inspector.py diff --git a/rpython/memory/gc/inspector.py b/rpython/memory/gc/inspector.py --- a/rpython/memory/gc/inspector.py +++ b/rpython/memory/gc/inspector.py @@ -36,6 +36,7 @@ gc._list_rpy = None def get_rpy_roots(gc): + assert 0 # XXX XXX STM TEMP count = _do_count_rpy_roots(gc) extra = 16 while True: @@ -74,6 +75,7 @@ gc.trace(llmemory.cast_ptr_to_adr(gcref), _append_rpy_referent, gc) def get_rpy_referents(gc, gcref): + assert 0 # XXX XXX STM TEMP count = _do_count_rpy_referents(gc, gcref) result = [lltype.nullptr(llmemory.GCREF.TO)] * count _do_append_rpy_referents(gc, gcref, result) @@ -82,13 +84,16 @@ # ---------- def get_rpy_memory_usage(gc, gcref): + assert 0 # XXX XXX STM TEMP return gc.get_size_incl_hash(llmemory.cast_ptr_to_adr(gcref)) def get_rpy_type_index(gc, gcref): + assert 0 # XXX XXX STM TEMP typeid = gc.get_type_id(llmemory.cast_ptr_to_adr(gcref)) return gc.get_member_index(typeid) def is_rpy_instance(gc, gcref): + assert 0 # XXX XXX STM TEMP typeid = gc.get_type_id(llmemory.cast_ptr_to_adr(gcref)) return gc.is_rpython_class(typeid) From noreply at buildbot.pypy.org Wed Mar 12 16:35:24 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 12 Mar 2014 16:35:24 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: start adding weakref support Message-ID: <20140312153524.7965A1C301F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r979:3040d781125a Date: 2014-03-12 16:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/3040d781125a/ Log: start adding weakref support diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -180,6 +180,7 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(list_is_empty(STM_PSEGMENT->young_weakrefs)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -54,6 +54,11 @@ after the object. */ GCFLAG_HAS_SHADOW = 0x04, + /* This flag is set on weakref objects. Weakref objects have a + reference to the referenced object at the byte-offset + stmcb_size_rounded_up(obj) - sizeof(void*) */ + GCFLAG_WEAKREF = 0x08, + /* All remaining bits of the 32-bit 'stm_flags' field are taken by the "overflow number". This is a number that identifies the "overflow objects" from the current transaction among all old @@ -61,7 +66,7 @@ current transaction that have been flushed out of the nursery, which occurs if the same transaction allocates too many objects. */ - GCFLAG_OVERFLOW_NUMBER_bit0 = 0x08 /* must be last */ + GCFLAG_OVERFLOW_NUMBER_bit0 = 0x10 /* must be last */ }; @@ -105,6 +110,14 @@ next minor collection. */ struct tree_s *nursery_objects_shadows; + /* List of all young weakrefs to check in minor collections. These + are the only weakrefs that may point to young objects. */ + struct list_s *young_weakrefs; + + /* List of all old weakrefs to check in major collections. These + weakrefs never point to young objects */ + struct list_s *old_weakrefs; + /* Tree of 'key->callback' associations from stm_call_on_abort() */ struct tree_s *callbacks_on_abort; diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -299,6 +299,9 @@ collect_oldrefs_to_nursery(); + /* now all surviving nursery objects have been moved out */ + stm_move_young_weakrefs(); + throw_away_nursery(get_priv_segment(STM_SEGMENT->segment_num)); assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -57,6 +57,8 @@ pr->objects_pointing_to_nursery = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); + pr->young_weakrefs = list_create(); + pr->old_weakrefs = list_create(); pr->young_outside_nursery = tree_create(); pr->nursery_objects_shadows = tree_create(); pr->callbacks_on_abort = tree_create(); @@ -95,6 +97,8 @@ assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); + list_free(pr->young_weakrefs); + list_free(pr->old_weakrefs); tree_free(pr->young_outside_nursery); tree_free(pr->nursery_objects_shadows); tree_free(pr->callbacks_on_abort); diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c new file mode 100644 --- /dev/null +++ b/c7/stm/weakref.c @@ -0,0 +1,226 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +object_t *stm_allocate_weakref(ssize_t size_rounded_up) +{ + OPT_ASSERT(size_rounded_up > sizeof(struct object_s)); + object_t *obj = stm_allocate(size_rounded_up); + obj->stm_flags |= GCFLAG_WEAKREF; + LIST_APPEND(STM_PSEGMENT->young_weakrefs, obj); + return obj; +} + + +/***** Minor collection *****/ + +void stm_move_young_weakrefs() +{ + /* The code relies on the fact that no weakref can be an old object + weakly pointing to a young object. Indeed, weakrefs are immutable + so they cannot point to an object that was created after it. + */ + LIST_FOREACH_R( + STM_PSEGMENT->young_weakrefs, + object_t * /*item*/, + ({ + if (_is_in_nursery(item)) { + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)item; + + /* the following checks are done like in nursery.c: */ + if (!(item->stm_flags & GCFLAG_HAS_SHADOW) + || (pforwarded_array[0] != GCWORD_MOVED)) { + /* weakref dies */ + continue; + } + + item = pforwarded_array[1]; /* moved location */ + } + else { + /* young outside nursery object */ + if (tree_contains(STM_PSEGMENT->young_outside_nursery, + (uintptr_t)item)) { + /* still in the tree -> wasn't seen by the minor collection, + so it doesn't survive */ + continue; + } + } + assert(!_is_young(item)); + + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, item); + ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); + object_t *pointing_to = *WEAKREF_PTR(item, size); + assert(pointing_to != NULL); + + if (_is_in_nursery(pointing_to)) { + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)pointing_to; + /* the following checks are done like in nursery.c: */ + if (!(pointing_to->stm_flags & GCFLAG_HAS_SHADOW) + || (pforwarded_array[0] != GCWORD_MOVED)) { + /* pointing_to dies */ + *WEAKREF_PTR(item, size) = NULL; + continue; /* no need to remember in old_weakrefs */ + } + else { + /* moved location */ + *WEAKREF_PTR(item, size) = pforwarded_array[1]; + } + } + else { + /* young outside nursery object or old object */ + if (tree_contains(STM_PSEGMENT->young_outside_nursery, + (uintptr_t)pointing_to)) { + /* still in the tree -> wasn't seen by the minor collection, + so it doesn't survive */ + *WEAKREF_PTR(item, size) = NULL; + continue; /* no need to remember in old_weakrefs */ + } + /* pointing_to was already old */ + } + LIST_APPEND(STM_PSEGMENT->old_weakrefs, item); + })); + list_clear(STM_PSEGMENT->young_weakrefs); +} + + +/***** Major collection *****/ + +/* static _Bool is_partially_visited(gcptr obj) */ +/* { */ +/* /\* Based on gcpage.c:visit_public(). Check the code here if we change */ +/* visit_public(). Returns True or False depending on whether we find any */ +/* version of 'obj' to be MARKED or not. */ +/* *\/ */ +/* assert(IMPLIES(obj->h_tid & GCFLAG_VISITED, */ +/* obj->h_tid & GCFLAG_MARKED)); */ +/* if (obj->h_tid & GCFLAG_MARKED) */ +/* return 1; */ + +/* /\* if (!(obj->h_tid & GCFLAG_PUBLIC)) *\/ */ +/* /\* return 0; *\/ */ +/* assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); */ +/* if (obj->h_original != 0) { */ +/* gcptr original = (gcptr)obj->h_original; */ +/* assert(IMPLIES(original->h_tid & GCFLAG_VISITED, */ +/* original->h_tid & GCFLAG_MARKED)); */ +/* if (original->h_tid & GCFLAG_MARKED) */ +/* return 1; */ +/* } */ +/* return 0; */ +/* } */ + +/* static void update_old_weakrefs_list(struct tx_public_descriptor *gcp) */ +/* { */ +/* long i, size = gcp->old_weakrefs.size; */ +/* gcptr *items = gcp->old_weakrefs.items; */ + +/* for (i = 0; i < size; i++) { */ +/* gcptr weakref = items[i]; */ + +/* /\* if a weakref moved, update its position in the list *\/ */ +/* if (weakref->h_tid & GCFLAG_MOVED) { */ +/* items[i] = (gcptr)weakref->h_original; */ +/* } */ +/* } */ +/* } */ + +/* static void visit_old_weakrefs(struct tx_public_descriptor *gcp) */ +/* { */ +/* /\* Note: it's possible that a weakref points to a public stub to a */ +/* protected object, and only the protected object was marked as */ +/* VISITED so far. In this case, this function needs to mark the */ +/* public stub as VISITED too. */ +/* *\/ */ +/* long i, size = gcp->old_weakrefs.size; */ +/* gcptr *items = gcp->old_weakrefs.items; */ + +/* for (i = 0; i < size; i++) { */ +/* gcptr weakref = items[i]; */ + +/* if (!(weakref->h_tid & GCFLAG_VISITED)) { */ +/* /\* the weakref itself dies *\/ */ +/* } */ +/* else { */ +/* /\* the weakref belongs to our thread, therefore we should */ +/* always see the most current revision here: *\/ */ +/* assert(weakref->h_revision & 1); */ + +/* size_t size = stmgc_size(weakref); */ +/* gcptr pointing_to = *WEAKREF_PTR(weakref, size); */ +/* assert(pointing_to != NULL); */ +/* if (is_partially_visited(pointing_to)) { */ +/* pointing_to = stmgcpage_visit(pointing_to); */ +/* dprintf(("mweakref ptr moved %p->%p\n", */ +/* *WEAKREF_PTR(weakref, size), */ +/* pointing_to)); */ + +/* assert(pointing_to->h_tid & GCFLAG_VISITED); */ +/* *WEAKREF_PTR(weakref, size) = pointing_to; */ +/* } */ +/* else { */ +/* /\* the weakref appears to be pointing to a dying object, */ +/* but we don't know for sure now. Clearing it is left */ +/* to clean_old_weakrefs(). *\/ */ +/* } */ +/* } */ +/* } */ +/* } */ + +/* static void clean_old_weakrefs(struct tx_public_descriptor *gcp) */ +/* { */ +/* long i, size = gcp->old_weakrefs.size; */ +/* gcptr *items = gcp->old_weakrefs.items; */ + +/* for (i = size - 1; i >= 0; i--) { */ +/* gcptr weakref = items[i]; */ +/* assert(weakref->h_revision & 1); */ +/* if (weakref->h_tid & GCFLAG_VISITED) { */ +/* size_t size = stmgc_size(weakref); */ +/* gcptr pointing_to = *WEAKREF_PTR(weakref, size); */ +/* if (pointing_to->h_tid & GCFLAG_VISITED) { */ +/* continue; /\* the target stays alive, the weakref remains *\/ */ +/* } */ +/* dprintf(("mweakref lost ptr %p\n", *WEAKREF_PTR(weakref, size))); */ +/* *WEAKREF_PTR(weakref, size) = NULL; /\* the target dies *\/ */ +/* } */ +/* /\* remove this weakref from the list *\/ */ +/* items[i] = items[--gcp->old_weakrefs.size]; */ +/* } */ +/* gcptrlist_compress(&gcp->old_weakrefs); */ +/* } */ + +/* static void for_each_public_descriptor( */ +/* void visit(struct tx_public_descriptor *)) { */ +/* struct tx_descriptor *d; */ +/* for (d = stm_tx_head; d; d = d->tx_next) */ +/* visit(d->public_descriptor); */ + +/* struct tx_public_descriptor *gcp; */ +/* revision_t index = -1; */ +/* while ((gcp = stm_get_free_public_descriptor(&index)) != NULL) */ +/* visit(gcp); */ +/* } */ + +/* void stm_update_old_weakrefs_lists(void) */ +/* { */ +/* /\* go over old weakrefs lists and update the list with possibly */ +/* new pointers because of copy_over_original *\/ */ +/* for_each_public_descriptor(update_old_weakrefs_list); */ +/* } */ + + +/* void stm_visit_old_weakrefs(void) */ +/* { */ +/* /\* Figure out which weakrefs survive, which possibly */ +/* adds more objects to 'objects_to_trace'. */ +/* *\/ */ +/* for_each_public_descriptor(visit_old_weakrefs); */ +/* } */ + +/* void stm_clean_old_weakrefs(void) */ +/* { */ +/* /\* Clean up the non-surviving weakrefs */ +/* *\/ */ +/* for_each_public_descriptor(clean_old_weakrefs); */ +/* } */ diff --git a/c7/stm/weakref.h b/c7/stm/weakref.h new file mode 100644 --- /dev/null +++ b/c7/stm/weakref.h @@ -0,0 +1,13 @@ +#ifndef _SRCSTM_WEAKREF_H +#define _SRCSTM_WEAKREF_H + + +#define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((stm_char *)(wr)) + (sz) - sizeof(void*))) + +void stm_move_young_weakrefs(void); +/* void stm_update_old_weakrefs_lists(void); */ +/* void stm_visit_old_weakrefs(void); */ +/* void stm_clean_old_weakrefs(void); */ + + +#endif diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -12,6 +12,7 @@ #include "stm/contention.h" #include "stm/extra.h" #include "stm/fprintcolor.h" +#include "stm/weakref.h" #include "stm/misc.c" #include "stm/list.c" @@ -28,3 +29,4 @@ #include "stm/contention.c" #include "stm/extra.c" #include "stm/fprintcolor.c" +#include "stm/weakref.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -194,6 +194,14 @@ return (object_t *)p; } +/* Allocate a weakref object. Weakref objects have a + reference to the referenced object at the byte-offset + stmcb_size_rounded_up(obj) - sizeof(void*) + This reference becomes NULL if the referenced object was freed. + You must assign the reference before the next collection may happen. + After that, they may be considered immutable. */ +object_t *stm_allocate_weakref(ssize_t size_rounded_up); + /* stm_setup() needs to be called once at the beginning of the program. stm_teardown() can be called at the end, but that's not necessary diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -25,6 +25,7 @@ void stm_read(object_t *obj); /*void stm_write(object_t *obj); use _checked_stm_write() instead */ object_t *stm_allocate(ssize_t size_rounded_up); +object_t *stm_allocate_weakref(ssize_t size_rounded_up); object_t *_stm_allocate_old(ssize_t size_rounded_up); void stm_setup(void); @@ -54,6 +55,10 @@ void _set_ptr(object_t *obj, int n, object_t *v); object_t * _get_ptr(object_t *obj, int n); +void _set_weakref(object_t *obj, object_t *v); +object_t* _get_weakref(object_t *obj); + + void _stm_start_safe_point(void); bool _check_stop_safe_point(void); @@ -163,6 +168,21 @@ } +#define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((stm_char *)(wr)) + (sz) - sizeof(void*))) +void _set_weakref(object_t *obj, object_t *v) +{ + char *realobj = _stm_real_address(obj); + ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); + *WEAKREF_PTR(obj, size) = v; +} + +object_t * _get_weakref(object_t *obj) +{ + char *realobj = _stm_real_address(obj); + ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); + return *WEAKREF_PTR(obj, size); +} + void _set_ptr(object_t *obj, int n, object_t *v) { long nrefs = (long)((myobj_t*)obj)->type_id - 421420; @@ -266,6 +286,16 @@ lib._set_type_id(o, tid) return o +def stm_allocate_weakref(point_to_obj): + o = lib.stm_allocate(HDR + WORD) + tid = 421420 + lib._set_type_id(o, tid) + lib._set_weakref(o, point_to_obj) + return o + +def stm_get_weakref(o): + return lib._get_weakref(o) + def stm_allocate_refs(n): o = lib.stm_allocate(HDR + n * WORD) tid = 421420 + n diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py new file mode 100644 --- /dev/null +++ b/c7/test/test_weakref.py @@ -0,0 +1,221 @@ +import py +from support import * + + + + +class TestMinorCollection(BaseTest): + def test_simple(self): + lib._stm_set_nursery_free_count(2048) + self.start_transaction() + + self.push_root_no_gc() + lp1 = stm_allocate_weakref(ffi.NULL) # no collection here + self.pop_root() + + assert stm_get_weakref(lp1) == ffi.NULL + + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + + assert stm_get_weakref(lp1) == ffi.NULL + + # def test_weakref_invalidate(self): + # p2 = nalloc(HDR) + # p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + # assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE | GCFLAG_WEAKREF + # assert p1.h_revision == lib.get_private_rev_num() + # assert lib.rawgetptr(p1, 0) == p2 + # lib.stm_push_root(p1) + # minor_collect() + # p1 = lib.stm_pop_root() + # assert lib.rawgetptr(p1, 0) == ffi.NULL + + # def test_weakref_itself_dies(self): + # p2 = nalloc(HDR) + # p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + # minor_collect() + + # def test_weakref_keep(self): + # p2 = nalloc(HDR) + # p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + # assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE | GCFLAG_WEAKREF + # assert p1.h_revision == lib.get_private_rev_num() + # assert lib.rawgetptr(p1, 0) == p2 + # lib.stm_push_root(p1) + # lib.stm_push_root(p2) + # minor_collect() + # p2 = lib.stm_pop_root() + # p1 = lib.stm_pop_root() + # assert lib.rawgetptr(p1, 0) == p2 + + # def test_weakref_old_keep(self): + # p2 = oalloc(HDR) + # p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + # assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE | GCFLAG_WEAKREF + # assert p1.h_revision == lib.get_private_rev_num() + # assert lib.rawgetptr(p1, 0) == p2 + # lib.stm_push_root(p1) + # lib.stm_push_root(p2) + # minor_collect() + # p2 = lib.stm_pop_root() + # p1 = lib.stm_pop_root() + # assert lib.rawgetptr(p1, 0) == p2 + + + # def test_old_private_not_keep_alive_weakref(self): + # p = palloc(HDR + WORD) + # q = palloc_refs(1) + + # def f1(c): + # if c == 1: + # # currently fails because: + # # p1 still in old_objects_to_trace + # # -> keeps alive weakp1w + # # -> stm_move_young_weakrefs() sees a weakref pointing + # # to an aborted object + # minor_collect() + # return + + # # allocate the "container" as old, private q1 + # q1 = lib.stm_write_barrier(q) + # assert classify(q1) == "private" + # lib.stm_push_root(q1) + # minor_collect() + # q1 = lib.stm_pop_root() + # assert classify(q1) == "private" + # assert q1.h_tid & GCFLAG_OLD + # assert q1.h_tid & GCFLAG_WRITE_BARRIER + + # # allocate young private p1 to point to + # p1 = lib.stm_write_barrier(p) + # assert ffi.cast("gcptr", p1.h_original) == p + # assert classify(p1) == "private" + # assert not (p1.h_tid & GCFLAG_OLD) + + # lib.stm_push_root(p1) + # lib.stm_push_root(q1) + # weakp1w = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p1) + # q1 = lib.stm_pop_root() + # p1 = lib.stm_pop_root() + # # q1 still old, p1 still young, weakp1w also young + + # q1w = lib.stm_write_barrier(q1) + # # add q1 to old_objects_to_trace + # assert q1 == q1w # was and is private + # lib.rawsetptr(q1, 0, weakp1w) + + # abort_and_retry() + + # perform_transaction(f1) + + + + + + + + + + +# class TestMajorCollection(BaseTest): + +# def test_weakref_old(self): +# p2 = nalloc(HDR) +# p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) +# # +# lib.stm_push_root(p1) +# lib.stm_push_root(p2) +# major_collect() +# p2 = lib.stm_pop_root() +# p1 = lib.stm_pop_root() +# assert lib.rawgetptr(p1, 0) == p2 +# # +# lib.stm_push_root(p1) +# major_collect() +# p1 = lib.stm_pop_root() +# assert lib.rawgetptr(p1, 0) == ffi.NULL + +# def test_weakref_to_prebuilt(self): +# p2 = palloc(HDR) +# p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) +# # +# lib.stm_push_root(p1) +# major_collect() +# p1 = lib.stm_pop_root() +# assert lib.rawgetptr(p1, 0) == p2 + +# def test_weakref_update_version(self): +# p2 = oalloc(HDR + WORD); make_public(p2) +# p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) +# # +# lib.stm_push_root(p1) +# lib.stm_push_root(p2) +# major_collect() +# p2 = lib.stm_pop_root() +# p1 = lib.stm_pop_root() +# assert lib.rawgetptr(p1, 0) == p2 +# # +# lib.stm_commit_transaction() +# lib.stm_begin_inevitable_transaction() +# # +# lib.setlong(p2, 0, 912809218) # write barrier +# assert lib.rawgetlong(p2, 0) == 0 +# lib.stm_push_root(p1) +# lib.stm_push_root(p2) +# major_collect() +# p2 = lib.stm_pop_root() +# p1 = lib.stm_pop_root() +# assert lib.rawgetptr(p1, 0) == p2 +# assert lib.rawgetlong(p2, 0) == 0 +# # +# lib.stm_commit_transaction() +# lib.stm_begin_inevitable_transaction() +# # +# assert lib.rawgetptr(p1, 0) == p2 +# assert lib.rawgetlong(p2, 0) == 0 +# lib.stm_push_root(p1) +# lib.stm_push_root(p2) +# major_collect() +# p2b = lib.stm_pop_root() +# p1 = lib.stm_pop_root() +# assert lib.rawgetptr(p1, 0) == p2 +# assert p2b != p2 +# assert lib.getlong(p2b, 0) == 912809218 +# assert lib.getlong(p2, 0) == 912809218 + + +# def test_stealing(self): +# p = palloc_refs(1) +# u = palloc_refs(1) + +# def f1(r): +# q = nalloc(HDR+WORD) +# # lib.stm_push_root(q) +# w = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, q) +# # q = lib.stm_pop_root() +# setptr(p, 0, w) +# setptr(u, 0, q) +# minor_collect() +# lib.stm_commit_transaction() +# lib.stm_begin_inevitable_transaction() +# r.set(2) +# r.wait(3) +# print "happy" + +# def f2(r): +# r.wait(2) +# # steal p, should stub the weakref contained in it +# pr = lib.stm_read_barrier(p) +# w = rawgetptr(pr, 0) +# assert classify(w) == "stub" + +# # read weakref, should stub out weakptr +# wr = lib.stm_read_barrier(w) +# assert wr.h_tid & GCFLAG_WEAKREF +# assert classify(lib.rawgetptr(wr, 0)) == "stub" + +# r.set(3) + +# run_parallel(f1, f2) From noreply at buildbot.pypy.org Wed Mar 12 16:35:25 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 12 Mar 2014 16:35:25 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: fix Message-ID: <20140312153525.94B801C301F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r980:f10f5aecb970 Date: 2014-03-12 16:27 +0100 http://bitbucket.org/pypy/stmgc/changeset/f10f5aecb970/ Log: fix diff --git a/c7/test/common.py b/c7/test/common.py --- a/c7/test/common.py +++ b/c7/test/common.py @@ -14,7 +14,7 @@ os.path.join(parent_dir, "stmgc.c")] + [ os.path.join(parent_dir, 'stm', _n) for _n in os.listdir(os.path.join(parent_dir, 'stm')) - if _n.endswith('.h') or _n.endswith('.c')] + if (_n.endswith('.h') or _n.endswith('.c')) and not _n.startswith('.')] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -287,7 +287,7 @@ return o def stm_allocate_weakref(point_to_obj): - o = lib.stm_allocate(HDR + WORD) + o = lib.stm_allocate_weakref(HDR + WORD) tid = 421420 lib._set_type_id(o, tid) lib._set_weakref(o, point_to_obj) diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -10,16 +10,36 @@ self.start_transaction() self.push_root_no_gc() - lp1 = stm_allocate_weakref(ffi.NULL) # no collection here + lp2 = stm_allocate(48) + lp1 = stm_allocate_weakref(lp2) # no collection here self.pop_root() - assert stm_get_weakref(lp1) == ffi.NULL + assert stm_get_weakref(lp1) == lp2 self.push_root(lp1) stm_minor_collect() lp1 = self.pop_root() + # lp2 died + assert stm_get_weakref(lp1) == ffi.NULL - assert stm_get_weakref(lp1) == ffi.NULL + def test_still_simple(self): + lib._stm_set_nursery_free_count(2048) + self.start_transaction() + + self.push_root_no_gc() + lp2 = stm_allocate(48) + lp1 = stm_allocate_weakref(lp2) # no collection here + self.pop_root() + + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + self.push_root(lp2) + stm_minor_collect() + lp2 = self.pop_root() + lp1 = self.pop_root() + # lp2 survived + assert stm_get_weakref(lp1) == lp2 # def test_weakref_invalidate(self): # p2 = nalloc(HDR) From noreply at buildbot.pypy.org Wed Mar 12 16:35:26 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 12 Mar 2014 16:35:26 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: more tests Message-ID: <20140312153526.B1A5D1C301F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r981:43a1c14eaaa6 Date: 2014-03-12 16:36 +0100 http://bitbucket.org/pypy/stmgc/changeset/43a1c14eaaa6/ Log: more tests diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -22,6 +22,12 @@ # lp2 died assert stm_get_weakref(lp1) == ffi.NULL + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + # lp2 died + assert stm_get_weakref(lp1) == ffi.NULL + def test_still_simple(self): lib._stm_set_nursery_free_count(2048) self.start_transaction() @@ -41,94 +47,39 @@ # lp2 survived assert stm_get_weakref(lp1) == lp2 - # def test_weakref_invalidate(self): - # p2 = nalloc(HDR) - # p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) - # assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE | GCFLAG_WEAKREF - # assert p1.h_revision == lib.get_private_rev_num() - # assert lib.rawgetptr(p1, 0) == p2 - # lib.stm_push_root(p1) - # minor_collect() - # p1 = lib.stm_pop_root() - # assert lib.rawgetptr(p1, 0) == ffi.NULL + self.push_root(lp1) + self.push_root(lp2) + stm_minor_collect() + lp2 = self.pop_root() + lp1 = self.pop_root() + # lp2 survived + assert stm_get_weakref(lp1) == lp2 - # def test_weakref_itself_dies(self): - # p2 = nalloc(HDR) - # p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) - # minor_collect() + def test_weakref_itself_dies(self): + self.start_transaction() - # def test_weakref_keep(self): - # p2 = nalloc(HDR) - # p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) - # assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE | GCFLAG_WEAKREF - # assert p1.h_revision == lib.get_private_rev_num() - # assert lib.rawgetptr(p1, 0) == p2 - # lib.stm_push_root(p1) - # lib.stm_push_root(p2) - # minor_collect() - # p2 = lib.stm_pop_root() - # p1 = lib.stm_pop_root() - # assert lib.rawgetptr(p1, 0) == p2 + self.push_root_no_gc() + lp2 = stm_allocate(48) + stm_allocate_weakref(lp2) # no collection here + self.pop_root() + stm_minor_collect() + assert lib._stm_total_allocated() == 0 - # def test_weakref_old_keep(self): - # p2 = oalloc(HDR) - # p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) - # assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE | GCFLAG_WEAKREF - # assert p1.h_revision == lib.get_private_rev_num() - # assert lib.rawgetptr(p1, 0) == p2 - # lib.stm_push_root(p1) - # lib.stm_push_root(p2) - # minor_collect() - # p2 = lib.stm_pop_root() - # p1 = lib.stm_pop_root() - # assert lib.rawgetptr(p1, 0) == p2 + def test_weakref_old_keep(self): + lp0 = stm_allocate_old(48) - # def test_old_private_not_keep_alive_weakref(self): - # p = palloc(HDR + WORD) - # q = palloc_refs(1) + self.start_transaction() + self.push_root_no_gc() + lp1 = stm_allocate_weakref(lp0) # no collection here + self.pop_root() - # def f1(c): - # if c == 1: - # # currently fails because: - # # p1 still in old_objects_to_trace - # # -> keeps alive weakp1w - # # -> stm_move_young_weakrefs() sees a weakref pointing - # # to an aborted object - # minor_collect() - # return + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() - # # allocate the "container" as old, private q1 - # q1 = lib.stm_write_barrier(q) - # assert classify(q1) == "private" - # lib.stm_push_root(q1) - # minor_collect() - # q1 = lib.stm_pop_root() - # assert classify(q1) == "private" - # assert q1.h_tid & GCFLAG_OLD - # assert q1.h_tid & GCFLAG_WRITE_BARRIER + assert stm_get_weakref(lp1) == lp0 - # # allocate young private p1 to point to - # p1 = lib.stm_write_barrier(p) - # assert ffi.cast("gcptr", p1.h_original) == p - # assert classify(p1) == "private" - # assert not (p1.h_tid & GCFLAG_OLD) - - # lib.stm_push_root(p1) - # lib.stm_push_root(q1) - # weakp1w = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p1) - # q1 = lib.stm_pop_root() - # p1 = lib.stm_pop_root() - # # q1 still old, p1 still young, weakp1w also young - - # q1w = lib.stm_write_barrier(q1) - # # add q1 to old_objects_to_trace - # assert q1 == q1w # was and is private - # lib.rawsetptr(q1, 0, weakp1w) - - # abort_and_retry() - - # perform_transaction(f1) From noreply at buildbot.pypy.org Wed Mar 12 16:53:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 16:53:03 +0100 (CET) Subject: [pypy-commit] stmgc default: This patch to LLVM seems to get me farther. Message-ID: <20140312155303.DE5D21C301F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r982:a58bb2fa06ad Date: 2014-03-12 16:52 +0100 http://bitbucket.org/pypy/stmgc/changeset/a58bb2fa06ad/ Log: This patch to LLVM seems to get me farther. diff --git a/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff b/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff new file mode 100644 --- /dev/null +++ b/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff @@ -0,0 +1,22 @@ +Index: lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +=================================================================== +--- lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (revision 199602) ++++ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (working copy) +@@ -295,6 +295,17 @@ + + Type *SrcPTy = SrcTy->getElementType(); + ++ // XXX note that we might end up with a bogus cast: if the original ++ // cast in 'load (cast P)' is between "foo addrspace1 **" and "foo ++ // addrspace2 **", then we cannot re-express the two operations as ++ // 'cast (load P)' because that would be casting a "foo addrspace1 *" ++ // to "foo addrspace2 *". While nothing is really wrong about that ++ // cast, llvm forbids it even in internally-generated operations. ++ if (SrcPTy->isPointerTy() && DestPTy->isPointerTy() && ++ cast(DestPTy)->getAddressSpace() != ++ cast(SrcPTy)->getAddressSpace()) ++ return 0; ++ + if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() || + DestPTy->isVectorTy()) { + // If the source is an array, the code below will not succeed. Check to From noreply at buildbot.pypy.org Wed Mar 12 17:18:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 17:18:49 +0100 (CET) Subject: [pypy-commit] stmgc default: Support prebuilt objects that are only aligned on multiples of 4 bytes Message-ID: <20140312161849.649F01C301F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r983:c6ed145863b4 Date: 2014-03-12 17:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/c6ed145863b4/ Log: Support prebuilt objects that are only aligned on multiples of 4 bytes as static data. diff --git a/c7/stm/prebuilt.c b/c7/stm/prebuilt.c --- a/c7/stm/prebuilt.c +++ b/c7/stm/prebuilt.c @@ -16,9 +16,13 @@ return; /* If the object was already moved, it is stored in 'tree_prebuilt_objs'. + For now we use this dictionary, with keys being equal to the double + of the numeric address of the prebuilt object. We double them in + order to support addresses that are only 4-byte-aligned in the + static data. */ wlog_t *item; - TREE_FIND(*tree_prebuilt_objs, (uintptr_t)obj, item, goto not_found); + TREE_FIND(*tree_prebuilt_objs, 2 * (uintptr_t)obj, item, goto not_found); *pstaticobj_invalid = (object_t *)item->val; /* already moved */ return; @@ -38,7 +42,7 @@ nobj->stm_flags = GCFLAG_WRITE_BARRIER; /* Add the object to the tree */ - tree_insert(tree_prebuilt_objs, (uintptr_t)obj, (uintptr_t)nobj); + tree_insert(tree_prebuilt_objs, 2 * (uintptr_t)obj, (uintptr_t)nobj); /* Done */ *pstaticobj_invalid = nobj; diff --git a/c7/test/test_prebuilt.py b/c7/test/test_prebuilt.py --- a/c7/test/test_prebuilt.py +++ b/c7/test/test_prebuilt.py @@ -83,3 +83,17 @@ def test_multiple_calls_to_stm_setup_prebuilt_2(self): self.test_multiple_calls_to_stm_setup_prebuilt_1(reverse=True) + + def test_prebuilt_align_4_byte(self): + static0 = prebuilt(16) + p0 = ffi.cast("char *", static0) + for i in reversed(range(12)): + p0[i + 4] = p0[i] + static1 = ffi.cast("object_t *", p0 + 4) + ffi.cast("char *", static1)[8:11] = 'ABC' + lp = lib.stm_setup_prebuilt(static1) + # + self.start_transaction() + assert stm_get_char(lp, 8) == 'A' + assert stm_get_char(lp, 9) == 'B' + assert stm_get_char(lp, 10) == 'C' From noreply at buildbot.pypy.org Wed Mar 12 17:20:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 17:20:08 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Another static=True missing Message-ID: <20140312162008.C88EF1C301F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69901:8a001c675c7f Date: 2014-03-12 17:18 +0100 http://bitbucket.org/pypy/pypy/changeset/8a001c675c7f/ Log: Another static=True missing diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -7,7 +7,7 @@ from rpython.rtyper.lltypesystem.rffi import CConstant from rpython.rtyper.lltypesystem import llgroup from rpython.tool.sourcetools import valid_identifier -from rpython.translator.c.primitive import PrimitiveName, PrimitiveType +from rpython.translator.c.primitive import PrimitiveName, PrimitiveType, name_gcref from rpython.translator.c.node import StructDefNode, ArrayDefNode from rpython.translator.c.node import FixedSizeArrayDefNode, BareBoneArrayDefNode from rpython.translator.c.node import ContainerNodeFactory, ExtTypeOpaqueDefNode @@ -183,8 +183,10 @@ if isinstance(obj, CConstant): return obj.c_name # without further checks T = typeOf(obj) - if isinstance(T, Primitive) or T == GCREF: + if isinstance(T, Primitive): return PrimitiveName[T](obj, self) + elif T == GCREF: + return name_gcref(obj, self, static=static) elif isinstance(T, Ptr): if (isinstance(T.TO, OpaqueType) and T.TO.hints.get('c_pointer_typedef') is not None): diff --git a/rpython/translator/c/primitive.py b/rpython/translator/c/primitive.py --- a/rpython/translator/c/primitive.py +++ b/rpython/translator/c/primitive.py @@ -174,7 +174,7 @@ else: return 'NULL' -def name_gcref(value, db): +def name_gcref(value, db, static=False): if value: obj = value._obj if isinstance(obj, int): @@ -184,7 +184,7 @@ if isinstance(realobj, int): return _name_tagged(realobj, db) realvalue = cast_opaque_ptr(Ptr(typeOf(realobj)), value) - return db.get(realvalue) + return db.get(realvalue, static=static) else: return 'NULL' From noreply at buildbot.pypy.org Wed Mar 12 17:20:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 17:20:10 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/c6ed145863b4 Message-ID: <20140312162010.2B5CB1C301F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69902:32b8f8917322 Date: 2014-03-12 17:19 +0100 http://bitbucket.org/pypy/pypy/changeset/32b8f8917322/ Log: import stmgc/c6ed145863b4 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -3f0d8773b90b +c6ed145863b4 diff --git a/rpython/translator/stm/src_stm/stm/prebuilt.c b/rpython/translator/stm/src_stm/stm/prebuilt.c --- a/rpython/translator/stm/src_stm/stm/prebuilt.c +++ b/rpython/translator/stm/src_stm/stm/prebuilt.c @@ -17,9 +17,13 @@ return; /* If the object was already moved, it is stored in 'tree_prebuilt_objs'. + For now we use this dictionary, with keys being equal to the double + of the numeric address of the prebuilt object. We double them in + order to support addresses that are only 4-byte-aligned in the + static data. */ wlog_t *item; - TREE_FIND(*tree_prebuilt_objs, (uintptr_t)obj, item, goto not_found); + TREE_FIND(*tree_prebuilt_objs, 2 * (uintptr_t)obj, item, goto not_found); *pstaticobj_invalid = (object_t *)item->val; /* already moved */ return; @@ -39,7 +43,7 @@ nobj->stm_flags = GCFLAG_WRITE_BARRIER; /* Add the object to the tree */ - tree_insert(tree_prebuilt_objs, (uintptr_t)obj, (uintptr_t)nobj); + tree_insert(tree_prebuilt_objs, 2 * (uintptr_t)obj, (uintptr_t)nobj); /* Done */ *pstaticobj_invalid = nobj; From noreply at buildbot.pypy.org Wed Mar 12 17:28:10 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 12 Mar 2014 17:28:10 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: fix cleanup on abort Message-ID: <20140312162810.637241C301F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r984:2bb70b712097 Date: 2014-03-12 16:52 +0100 http://bitbucket.org/pypy/stmgc/changeset/2bb70b712097/ Log: fix cleanup on abort diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -484,6 +484,7 @@ /* reset these lists to NULL too on abort */ LIST_FREE(pseg->objects_pointing_to_nursery); LIST_FREE(pseg->large_overflow_objects); + list_clear(pseg->young_weakrefs); } static void abort_with_mutex(void) diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -81,11 +81,49 @@ assert stm_get_weakref(lp1) == lp0 + def test_abort_cleanup(self): + self.start_transaction() + self.push_root_no_gc() + lp1 = stm_allocate_weakref(ffi.NULL) # no collection here + self.pop_root() + self.abort_transaction() + self.start_transaction() +class TestMajorCollection(BaseTest): + def test_simple(self): + self.start_transaction() + + self.push_root_no_gc() + lp2 = stm_allocate(48) + lp1 = stm_allocate_weakref(lp2) # no collection here + self.pop_root() + + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + self.push_root(lp2) + stm_minor_collect() + lp2 = self.pop_root() + lp1 = self.pop_root() + # lp2 survived + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + # lp2 survived because no major collection + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + stm_major_collect() + lp1 = self.pop_root() + # lp2 died + assert stm_get_weakref(lp1) == ffi.NULL + From noreply at buildbot.pypy.org Wed Mar 12 17:28:11 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 12 Mar 2014 17:28:11 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: another test Message-ID: <20140312162811.8AA841C301F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r985:2a1c60c59b19 Date: 2014-03-12 17:03 +0100 http://bitbucket.org/pypy/stmgc/changeset/2a1c60c59b19/ Log: another test diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -91,6 +91,27 @@ self.abort_transaction() self.start_transaction() + def test_big_alloc_sizes(self): + sizes = [lib._STM_FAST_ALLOC + 16, 48,] + + for osize in sizes: + self.start_transaction() + self.push_root_no_gc() + lp2 = stm_allocate(osize) + lp1 = stm_allocate_weakref(lp2) # no collection here + self.pop_root() + + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + self.push_root(lp2) + stm_minor_collect() + lp2 = self.pop_root() + lp1 = self.pop_root() + # lp2 survived + assert stm_get_weakref(lp1) == lp2 + self.abort_transaction() + class TestMajorCollection(BaseTest): From noreply at buildbot.pypy.org Wed Mar 12 17:28:12 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 12 Mar 2014 17:28:12 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: consider major collections Message-ID: <20140312162812.A9D251C301F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r986:510710368671 Date: 2014-03-12 17:21 +0100 http://bitbucket.org/pypy/stmgc/changeset/510710368671/ Log: consider major collections diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -111,11 +111,12 @@ struct tree_s *nursery_objects_shadows; /* List of all young weakrefs to check in minor collections. These - are the only weakrefs that may point to young objects. */ + are the only weakrefs that may point to young objects and never + contain NULL. */ struct list_s *young_weakrefs; /* List of all old weakrefs to check in major collections. These - weakrefs never point to young objects */ + weakrefs never point to young objects and never contain NULL. */ struct list_s *old_weakrefs; /* Tree of 'key->callback' associations from stm_call_on_abort() */ diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -450,7 +450,11 @@ /* 'objects_pointing_to_nursery' should be empty, but isn't necessarily because it also lists objects that have been written to but don't actually point to the nursery. Clear - it up and set GCFLAG_WRITE_BARRIER again on the objects. */ + it up and set GCFLAG_WRITE_BARRIER again on the objects. + This is the case for transactions where + MINOR_NOTHING_TO_DO() == false + but they still did write-barriers on objects + */ lst = pseg->objects_pointing_to_nursery; if (lst != NULL) { LIST_FOREACH_R(lst, uintptr_t /*item*/, @@ -537,6 +541,9 @@ mark_visit_from_roots(); LIST_FREE(mark_objects_to_trace); + /* weakrefs: */ + stm_visit_old_weakrefs(); + /* cleanup */ clean_up_segment_lists(); diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -86,141 +86,38 @@ /***** Major collection *****/ -/* static _Bool is_partially_visited(gcptr obj) */ -/* { */ -/* /\* Based on gcpage.c:visit_public(). Check the code here if we change */ -/* visit_public(). Returns True or False depending on whether we find any */ -/* version of 'obj' to be MARKED or not. */ -/* *\/ */ -/* assert(IMPLIES(obj->h_tid & GCFLAG_VISITED, */ -/* obj->h_tid & GCFLAG_MARKED)); */ -/* if (obj->h_tid & GCFLAG_MARKED) */ -/* return 1; */ -/* /\* if (!(obj->h_tid & GCFLAG_PUBLIC)) *\/ */ -/* /\* return 0; *\/ */ -/* assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); */ -/* if (obj->h_original != 0) { */ -/* gcptr original = (gcptr)obj->h_original; */ -/* assert(IMPLIES(original->h_tid & GCFLAG_VISITED, */ -/* original->h_tid & GCFLAG_MARKED)); */ -/* if (original->h_tid & GCFLAG_MARKED) */ -/* return 1; */ -/* } */ -/* return 0; */ -/* } */ +void stm_visit_old_weakrefs(void) +{ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + struct list_s *lst; -/* static void update_old_weakrefs_list(struct tx_public_descriptor *gcp) */ -/* { */ -/* long i, size = gcp->old_weakrefs.size; */ -/* gcptr *items = gcp->old_weakrefs.items; */ + lst = pseg->old_weakrefs; + uintptr_t n = list_count(lst); + while (n > 0) { + object_t *weakref = (object_t *)list_item(lst, --n); + if (!mark_visited_test(weakref)) { + /* weakref dies */ + list_set_item(lst, n, list_pop_item(lst)); + continue; + } -/* for (i = 0; i < size; i++) { */ -/* gcptr weakref = items[i]; */ + char *realobj = REAL_ADDRESS(pseg->pub.segment_base, weakref); + ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); + object_t *pointing_to = *WEAKREF_PTR(weakref, size); + assert(pointing_to != NULL); + if (!mark_visited_test(pointing_to)) { + *WEAKREF_PTR(weakref, size) = NULL; -/* /\* if a weakref moved, update its position in the list *\/ */ -/* if (weakref->h_tid & GCFLAG_MOVED) { */ -/* items[i] = (gcptr)weakref->h_original; */ -/* } */ -/* } */ -/* } */ - -/* static void visit_old_weakrefs(struct tx_public_descriptor *gcp) */ -/* { */ -/* /\* Note: it's possible that a weakref points to a public stub to a */ -/* protected object, and only the protected object was marked as */ -/* VISITED so far. In this case, this function needs to mark the */ -/* public stub as VISITED too. */ -/* *\/ */ -/* long i, size = gcp->old_weakrefs.size; */ -/* gcptr *items = gcp->old_weakrefs.items; */ - -/* for (i = 0; i < size; i++) { */ -/* gcptr weakref = items[i]; */ - -/* if (!(weakref->h_tid & GCFLAG_VISITED)) { */ -/* /\* the weakref itself dies *\/ */ -/* } */ -/* else { */ -/* /\* the weakref belongs to our thread, therefore we should */ -/* always see the most current revision here: *\/ */ -/* assert(weakref->h_revision & 1); */ - -/* size_t size = stmgc_size(weakref); */ -/* gcptr pointing_to = *WEAKREF_PTR(weakref, size); */ -/* assert(pointing_to != NULL); */ -/* if (is_partially_visited(pointing_to)) { */ -/* pointing_to = stmgcpage_visit(pointing_to); */ -/* dprintf(("mweakref ptr moved %p->%p\n", */ -/* *WEAKREF_PTR(weakref, size), */ -/* pointing_to)); */ - -/* assert(pointing_to->h_tid & GCFLAG_VISITED); */ -/* *WEAKREF_PTR(weakref, size) = pointing_to; */ -/* } */ -/* else { */ -/* /\* the weakref appears to be pointing to a dying object, */ -/* but we don't know for sure now. Clearing it is left */ -/* to clean_old_weakrefs(). *\/ */ -/* } */ -/* } */ -/* } */ -/* } */ - -/* static void clean_old_weakrefs(struct tx_public_descriptor *gcp) */ -/* { */ -/* long i, size = gcp->old_weakrefs.size; */ -/* gcptr *items = gcp->old_weakrefs.items; */ - -/* for (i = size - 1; i >= 0; i--) { */ -/* gcptr weakref = items[i]; */ -/* assert(weakref->h_revision & 1); */ -/* if (weakref->h_tid & GCFLAG_VISITED) { */ -/* size_t size = stmgc_size(weakref); */ -/* gcptr pointing_to = *WEAKREF_PTR(weakref, size); */ -/* if (pointing_to->h_tid & GCFLAG_VISITED) { */ -/* continue; /\* the target stays alive, the weakref remains *\/ */ -/* } */ -/* dprintf(("mweakref lost ptr %p\n", *WEAKREF_PTR(weakref, size))); */ -/* *WEAKREF_PTR(weakref, size) = NULL; /\* the target dies *\/ */ -/* } */ -/* /\* remove this weakref from the list *\/ */ -/* items[i] = items[--gcp->old_weakrefs.size]; */ -/* } */ -/* gcptrlist_compress(&gcp->old_weakrefs); */ -/* } */ - -/* static void for_each_public_descriptor( */ -/* void visit(struct tx_public_descriptor *)) { */ -/* struct tx_descriptor *d; */ -/* for (d = stm_tx_head; d; d = d->tx_next) */ -/* visit(d->public_descriptor); */ - -/* struct tx_public_descriptor *gcp; */ -/* revision_t index = -1; */ -/* while ((gcp = stm_get_free_public_descriptor(&index)) != NULL) */ -/* visit(gcp); */ -/* } */ - -/* void stm_update_old_weakrefs_lists(void) */ -/* { */ -/* /\* go over old weakrefs lists and update the list with possibly */ -/* new pointers because of copy_over_original *\/ */ -/* for_each_public_descriptor(update_old_weakrefs_list); */ -/* } */ - - -/* void stm_visit_old_weakrefs(void) */ -/* { */ -/* /\* Figure out which weakrefs survive, which possibly */ -/* adds more objects to 'objects_to_trace'. */ -/* *\/ */ -/* for_each_public_descriptor(visit_old_weakrefs); */ -/* } */ - -/* void stm_clean_old_weakrefs(void) */ -/* { */ -/* /\* Clean up the non-surviving weakrefs */ -/* *\/ */ -/* for_each_public_descriptor(clean_old_weakrefs); */ -/* } */ + /* we don't need it in this list anymore */ + list_set_item(lst, n, list_pop_item(lst)); + continue; + } + else { + /* it survives! */ + } + } + } +} diff --git a/c7/stm/weakref.h b/c7/stm/weakref.h --- a/c7/stm/weakref.h +++ b/c7/stm/weakref.h @@ -5,9 +5,7 @@ #define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((stm_char *)(wr)) + (sz) - sizeof(void*))) void stm_move_young_weakrefs(void); -/* void stm_update_old_weakrefs_lists(void); */ -/* void stm_visit_old_weakrefs(void); */ -/* void stm_clean_old_weakrefs(void); */ +void stm_visit_old_weakrefs(void); #endif diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -195,7 +195,7 @@ } /* Allocate a weakref object. Weakref objects have a - reference to the referenced object at the byte-offset + reference to an object at the byte-offset stmcb_size_rounded_up(obj) - sizeof(void*) This reference becomes NULL if the referenced object was freed. You must assign the reference before the next collection may happen. From noreply at buildbot.pypy.org Wed Mar 12 17:28:13 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 12 Mar 2014 17:28:13 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: more tests Message-ID: <20140312162813.C19C71C301F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r987:e08ba00edf36 Date: 2014-03-12 17:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/e08ba00edf36/ Log: more tests diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -145,107 +145,46 @@ # lp2 died assert stm_get_weakref(lp1) == ffi.NULL + def test_weakref_old_keep(self): + lp0 = stm_allocate_old(48) + self.start_transaction() + self.push_root_no_gc() + lp1 = stm_allocate_weakref(lp0) # no collection here + self.pop_root() + self.push_root(lp1) + stm_major_collect() + lp1 = self.pop_root() + assert stm_get_weakref(lp1) == lp0 -# class TestMajorCollection(BaseTest): + def test_survive(self): + self.start_transaction() -# def test_weakref_old(self): -# p2 = nalloc(HDR) -# p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) -# # -# lib.stm_push_root(p1) -# lib.stm_push_root(p2) -# major_collect() -# p2 = lib.stm_pop_root() -# p1 = lib.stm_pop_root() -# assert lib.rawgetptr(p1, 0) == p2 -# # -# lib.stm_push_root(p1) -# major_collect() -# p1 = lib.stm_pop_root() -# assert lib.rawgetptr(p1, 0) == ffi.NULL + self.push_root_no_gc() + lp2 = stm_allocate(48) + lp1 = stm_allocate_weakref(lp2) # no collection here + self.pop_root() -# def test_weakref_to_prebuilt(self): -# p2 = palloc(HDR) -# p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) -# # -# lib.stm_push_root(p1) -# major_collect() -# p1 = lib.stm_pop_root() -# assert lib.rawgetptr(p1, 0) == p2 + assert stm_get_weakref(lp1) == lp2 -# def test_weakref_update_version(self): -# p2 = oalloc(HDR + WORD); make_public(p2) -# p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) -# # -# lib.stm_push_root(p1) -# lib.stm_push_root(p2) -# major_collect() -# p2 = lib.stm_pop_root() -# p1 = lib.stm_pop_root() -# assert lib.rawgetptr(p1, 0) == p2 -# # -# lib.stm_commit_transaction() -# lib.stm_begin_inevitable_transaction() -# # -# lib.setlong(p2, 0, 912809218) # write barrier -# assert lib.rawgetlong(p2, 0) == 0 -# lib.stm_push_root(p1) -# lib.stm_push_root(p2) -# major_collect() -# p2 = lib.stm_pop_root() -# p1 = lib.stm_pop_root() -# assert lib.rawgetptr(p1, 0) == p2 -# assert lib.rawgetlong(p2, 0) == 0 -# # -# lib.stm_commit_transaction() -# lib.stm_begin_inevitable_transaction() -# # -# assert lib.rawgetptr(p1, 0) == p2 -# assert lib.rawgetlong(p2, 0) == 0 -# lib.stm_push_root(p1) -# lib.stm_push_root(p2) -# major_collect() -# p2b = lib.stm_pop_root() -# p1 = lib.stm_pop_root() -# assert lib.rawgetptr(p1, 0) == p2 -# assert p2b != p2 -# assert lib.getlong(p2b, 0) == 912809218 -# assert lib.getlong(p2, 0) == 912809218 + self.push_root(lp1) + self.push_root(lp2) + stm_major_collect() + lp2 = self.pop_root() + lp1 = self.pop_root() + # lp2 survived + assert stm_get_weakref(lp1) == lp2 + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + # lp2 survived because no major collection + assert stm_get_weakref(lp1) == lp2 -# def test_stealing(self): -# p = palloc_refs(1) -# u = palloc_refs(1) - -# def f1(r): -# q = nalloc(HDR+WORD) -# # lib.stm_push_root(q) -# w = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, q) -# # q = lib.stm_pop_root() -# setptr(p, 0, w) -# setptr(u, 0, q) -# minor_collect() -# lib.stm_commit_transaction() -# lib.stm_begin_inevitable_transaction() -# r.set(2) -# r.wait(3) -# print "happy" - -# def f2(r): -# r.wait(2) -# # steal p, should stub the weakref contained in it -# pr = lib.stm_read_barrier(p) -# w = rawgetptr(pr, 0) -# assert classify(w) == "stub" - -# # read weakref, should stub out weakptr -# wr = lib.stm_read_barrier(w) -# assert wr.h_tid & GCFLAG_WEAKREF -# assert classify(lib.rawgetptr(wr, 0)) == "stub" - -# r.set(3) - -# run_parallel(f1, f2) + self.push_root(lp1) + stm_major_collect() + lp1 = self.pop_root() + # lp2 died + assert stm_get_weakref(lp1) == ffi.NULL From noreply at buildbot.pypy.org Wed Mar 12 17:35:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Mar 2014 17:35:49 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: comment Message-ID: <20140312163549.685B51C23F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69903:6ad4610e8d57 Date: 2014-03-12 17:35 +0100 http://bitbucket.org/pypy/pypy/changeset/6ad4610e8d57/ Log: comment diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -857,6 +857,9 @@ print >> f, '#include "forwarddecl.h"' print >> f print >> f, 'object_t *rpy_prebuilt[] = {' + # XXX should ideally only list objects that are directly referenced + # from C code *or* that need a custom hash. This would reduce a lot + # the length of the lists. gclist = [(node.globalgcnum, node) for node in database.globalcontainers() if hasattr(node, 'globalgcnum') and node.globalgcnum >= 0] gclist.sort() From noreply at buildbot.pypy.org Wed Mar 12 17:55:10 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 12 Mar 2014 17:55:10 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: fix some multi-threading issues Message-ID: <20140312165510.1B5991C0460@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r988:e21afc419f0a Date: 2014-03-12 17:55 +0100 http://bitbucket.org/pypy/stmgc/changeset/e21afc419f0a/ Log: fix some multi-threading issues diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -54,11 +54,6 @@ after the object. */ GCFLAG_HAS_SHADOW = 0x04, - /* This flag is set on weakref objects. Weakref objects have a - reference to the referenced object at the byte-offset - stmcb_size_rounded_up(obj) - sizeof(void*) */ - GCFLAG_WEAKREF = 0x08, - /* All remaining bits of the 32-bit 'stm_flags' field are taken by the "overflow number". This is a number that identifies the "overflow objects" from the current transaction among all old @@ -66,7 +61,7 @@ current transaction that have been flushed out of the nursery, which occurs if the same transaction allocates too many objects. */ - GCFLAG_OVERFLOW_NUMBER_bit0 = 0x10 /* must be last */ + GCFLAG_OVERFLOW_NUMBER_bit0 = 0x8 /* must be last */ }; diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -7,7 +7,9 @@ { OPT_ASSERT(size_rounded_up > sizeof(struct object_s)); object_t *obj = stm_allocate(size_rounded_up); - obj->stm_flags |= GCFLAG_WEAKREF; + + assert(_is_in_nursery(obj)); /* see assert(0) which depends on it */ + LIST_APPEND(STM_PSEGMENT->young_weakrefs, obj); return obj; } @@ -38,13 +40,16 @@ item = pforwarded_array[1]; /* moved location */ } else { - /* young outside nursery object */ - if (tree_contains(STM_PSEGMENT->young_outside_nursery, - (uintptr_t)item)) { - /* still in the tree -> wasn't seen by the minor collection, - so it doesn't survive */ - continue; - } + /* tell me if we need this (requires synchronizing in case + of private pages) */ + assert(0); + /* /\* young outside nursery object *\/ */ + /* if (tree_contains(STM_PSEGMENT->young_outside_nursery, */ + /* (uintptr_t)item)) { */ + /* /\* still in the tree -> wasn't seen by the minor collection, */ + /* so it doesn't survive *\/ */ + /* continue; */ + /* } */ } assert(!_is_young(item)); @@ -109,7 +114,11 @@ object_t *pointing_to = *WEAKREF_PTR(weakref, size); assert(pointing_to != NULL); if (!mark_visited_test(pointing_to)) { + //assert(flag_page_private[(uintptr_t)weakref / 4096UL] != PRIVATE_PAGE); *WEAKREF_PTR(weakref, size) = NULL; + if (flag_page_private[(uintptr_t)weakref / 4096UL] == PRIVATE_PAGE) { + synchronize_overflow_object_now(weakref); + } /* we don't need it in this list anymore */ list_set_item(lst, n, list_pop_item(lst)); diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -113,6 +113,37 @@ self.abort_transaction() + def test_multiple_threads(self): + self.start_transaction() + lp0 = stm_allocate(1024) + self.push_root(lp0) + self.commit_transaction() + + self.start_transaction() + lp0 = self.pop_root() + self.push_root(lp0) + stm_write(lp0) # privatize page + + self.push_root_no_gc() + lp2 = stm_allocate(48) + lp1 = stm_allocate_weakref(lp2) # no collection here + self.pop_root() + + self.push_root(lp0) + self.push_root(lp1) + self.commit_transaction() + # lp2 dies + lp1 = self.pop_root() + self.push_root(lp1) + + assert stm_get_weakref(lp1) == ffi.NULL + + self.switch(1) + + assert stm_get_weakref(lp1) == ffi.NULL + + + class TestMajorCollection(BaseTest): def test_simple(self): @@ -188,3 +219,35 @@ lp1 = self.pop_root() # lp2 died assert stm_get_weakref(lp1) == ffi.NULL + + def test_multiple_threads(self): + self.start_transaction() + lp0 = stm_allocate(48) + lp1 = stm_allocate_weakref(lp0) # no collection here + self.push_root(lp1) + self.push_root(lp0) + self.commit_transaction() + + self.start_transaction() + lp0 = self.pop_root() + lp1 = self.pop_root() + self.push_root(lp1) + + stm_write(lp0) # privatize page with weakref in it too + + assert stm_get_page_flag(stm_get_obj_pages(lp1)[0]) == PRIVATE_PAGE + assert stm_get_weakref(lp1) == lp0 + + self.commit_transaction() + self.start_transaction() + + # lp0 dies + stm_major_collect() + + assert stm_get_weakref(lp1) == ffi.NULL + print stm_get_real_address(lp1) + + self.switch(1) + + assert stm_get_weakref(lp1) == ffi.NULL + print stm_get_real_address(lp1) From noreply at buildbot.pypy.org Wed Mar 12 17:57:29 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 12 Mar 2014 17:57:29 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: fix the test, still passes Message-ID: <20140312165729.A18821C23F3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r989:c1b635228c03 Date: 2014-03-12 17:58 +0100 http://bitbucket.org/pypy/stmgc/changeset/c1b635228c03/ Log: fix the test, still passes diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -249,5 +249,6 @@ self.switch(1) + self.start_transaction() assert stm_get_weakref(lp1) == ffi.NULL print stm_get_real_address(lp1) From noreply at buildbot.pypy.org Wed Mar 12 18:04:57 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 12 Mar 2014 18:04:57 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: fix some other test and its failure by adding some synchronize_overflow_object_now(). Maybe there is a better solution Message-ID: <20140312170457.7E01D1C23F3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r990:096c66121a67 Date: 2014-03-12 18:06 +0100 http://bitbucket.org/pypy/stmgc/changeset/096c66121a67/ Log: fix some other test and its failure by adding some synchronize_overflow_object_now(). Maybe there is a better solution diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -65,11 +65,13 @@ || (pforwarded_array[0] != GCWORD_MOVED)) { /* pointing_to dies */ *WEAKREF_PTR(item, size) = NULL; + synchronize_overflow_object_now(item); continue; /* no need to remember in old_weakrefs */ } else { /* moved location */ *WEAKREF_PTR(item, size) = pforwarded_array[1]; + synchronize_overflow_object_now(item); } } else { @@ -79,6 +81,7 @@ /* still in the tree -> wasn't seen by the minor collection, so it doesn't survive */ *WEAKREF_PTR(item, size) = NULL; + synchronize_overflow_object_now(item); continue; /* no need to remember in old_weakrefs */ } /* pointing_to was already old */ diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -140,6 +140,7 @@ self.switch(1) + self.start_transaction() assert stm_get_weakref(lp1) == ffi.NULL From noreply at buildbot.pypy.org Wed Mar 12 19:37:24 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 12 Mar 2014 19:37:24 +0100 (CET) Subject: [pypy-commit] pypy default: cleanups Message-ID: <20140312183724.3D2B41C23F3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69904:9c00b3c1768a Date: 2014-03-12 14:29 -0400 http://bitbucket.org/pypy/pypy/changeset/9c00b3c1768a/ Log: cleanups diff --git a/pypy/module/pypyjit/test/test_ztranslation.py b/pypy/module/pypyjit/test/test_ztranslation.py --- a/pypy/module/pypyjit/test/test_ztranslation.py +++ b/pypy/module/pypyjit/test/test_ztranslation.py @@ -1,5 +1,5 @@ +from pypy.objspace.fake.checkmodule import checkmodule -from pypy.objspace.fake.checkmodule import checkmodule def test_pypyjit_translates(): checkmodule('pypyjit') diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -7,13 +7,13 @@ from rpython.tool.udir import udir from rpython.tool import logparser from rpython.jit.tool.jitoutput import parse_prof -from pypy.module.pypyjit.test_pypy_c.model import (Log, find_ids_range, - find_ids, - OpMatcher, InvalidMatch) +from pypy.module.pypyjit.test_pypy_c.model import \ + Log, find_ids_range, find_ids, OpMatcher, InvalidMatch + class BaseTestPyPyC(object): log_string = 'jit-log-opt,jit-log-noopt,jit-log-virtualstate,jit-summary' - + def setup_class(cls): if '__pypy__' not in sys.builtin_module_names: py.test.skip("must run this test with pypy") @@ -98,7 +98,6 @@ class TestLog(object): - def test_find_ids_range(self): def f(): a = 0 # ID: myline @@ -127,7 +126,6 @@ class TestOpMatcher_(object): - def match(self, src1, src2, **kwds): from rpython.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) @@ -347,7 +345,6 @@ class TestRunPyPyC(BaseTestPyPyC): - def test_run_function(self): def f(a, b): return a+b @@ -385,7 +382,7 @@ assert len(loops) == 1 assert loops[0].filename == self.filepath assert len([op for op in loops[0].allops() if op.name == 'label']) == 0 - assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) == 0 + assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) == 0 # loops = log.loops_by_filename(self.filepath, is_entry_bridge=True) assert len(loops) == 1 @@ -454,7 +451,6 @@ # ops = loop.ops_by_id('foo', opcode='INPLACE_SUBTRACT') assert log.opnames(ops) == ['int_sub_ovf', 'guard_no_overflow'] - def test_inlined_function(self): def f(): diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -132,8 +132,8 @@ The actual return value may be determined with os.WEXITSTATUS. """ + res = 0 ll_f = self.ll_file - res = 0 if ll_f: # double close is allowed self.ll_file = lltype.nullptr(FILE) diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -1,9 +1,9 @@ - import os, sys, py from rpython.rtyper.test.tool import BaseRtypingTest from rpython.tool.udir import udir from rpython.rlib import rfile + class TestFile(BaseRtypingTest): def setup_class(cls): cls.tmpdir = udir.join('test_rfile') @@ -208,6 +208,7 @@ assert s == "%s\n" % printval assert os.WEXITSTATUS(r) == retval + class TestPopenR(BaseRtypingTest): def setup_class(cls): if sys.platform == 'win32': From noreply at buildbot.pypy.org Wed Mar 12 19:37:25 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 12 Mar 2014 19:37:25 +0100 (CET) Subject: [pypy-commit] pypy default: add hex/oct ops for ndarrays Message-ID: <20140312183725.865331C23F3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69905:713f1b94343d Date: 2014-03-12 14:32 -0400 http://bitbucket.org/pypy/pypy/changeset/713f1b94343d/ Log: add hex/oct ops for ndarrays diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1045,6 +1045,26 @@ value = self.get_scalar_value() return space.float(value) + def descr_hex(self, space): + if self.get_size() != 1: + raise oefmt(space.w_TypeError, + "only length-1 arrays can be converted to Python scalars") + if not self.get_dtype().is_int(): + raise oefmt(space.w_TypeError, + "don't know how to convert scalar number to hex") + value = self.get_scalar_value() + return space.hex(value) + + def descr_oct(self, space): + if self.get_size() != 1: + raise oefmt(space.w_TypeError, + "only length-1 arrays can be converted to Python scalars") + if not self.get_dtype().is_int(): + raise oefmt(space.w_TypeError, + "don't know how to convert scalar number to oct") + value = self.get_scalar_value() + return space.oct(value) + def descr_index(self, space): if self.get_size() != 1 or \ not self.get_dtype().is_int() or self.get_dtype().is_bool(): @@ -1237,6 +1257,8 @@ __int__ = interp2app(W_NDimArray.descr_int), __long__ = interp2app(W_NDimArray.descr_long), __float__ = interp2app(W_NDimArray.descr_float), + __hex__ = interp2app(W_NDimArray.descr_hex), + __oct__ = interp2app(W_NDimArray.descr_oct), __buffer__ = interp2app(W_NDimArray.descr_get_data), __index__ = interp2app(W_NDimArray.descr_index), diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2276,6 +2276,30 @@ exc = raises(TypeError, "float(np.array([1.5, 2.5]))") assert exc.value[0] == 'only length-1 arrays can be converted to Python scalars' + def test__hex__(self): + import numpy as np + assert hex(np.array(True)) == '0x1' + assert hex(np.array(15)) == '0xf' + assert hex(np.array([15])) == '0xf' + exc = raises(TypeError, "hex(np.array(1.5))") + assert str(exc.value) == "don't know how to convert scalar number to hex" + exc = raises(TypeError, "hex(np.array('15'))") + assert str(exc.value) == "don't know how to convert scalar number to hex" + exc = raises(TypeError, "hex(np.array([1, 2]))") + assert str(exc.value) == "only length-1 arrays can be converted to Python scalars" + + def test__oct__(self): + import numpy as np + assert oct(np.array(True)) == '01' + assert oct(np.array(15)) == '017' + assert oct(np.array([15])) == '017' + exc = raises(TypeError, "oct(np.array(1.5))") + assert str(exc.value) == "don't know how to convert scalar number to oct" + exc = raises(TypeError, "oct(np.array('15'))") + assert str(exc.value) == "don't know how to convert scalar number to oct" + exc = raises(TypeError, "oct(np.array([1, 2]))") + assert str(exc.value) == "only length-1 arrays can be converted to Python scalars" + def test__reduce__(self): from numpypy import array, dtype from cPickle import loads, dumps From noreply at buildbot.pypy.org Wed Mar 12 19:37:26 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 12 Mar 2014 19:37:26 +0100 (CET) Subject: [pypy-commit] pypy default: skip this test for now Message-ID: <20140312183726.ABEB91C23F3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69906:d6c57e3f3a1c Date: 2014-03-12 14:35 -0400 http://bitbucket.org/pypy/pypy/changeset/d6c57e3f3a1c/ Log: skip this test for now diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -43,6 +43,7 @@ log = self.run(main, []) assert log.result == 0 loop, = log.loops_by_filename(self.filepath) + skip('used to pass on 69421-f3e717c94913') assert loop.match(""" i81 = int_lt(i76, 300) guard_true(i81, descr=...) From noreply at buildbot.pypy.org Wed Mar 12 19:50:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 12 Mar 2014 19:50:30 +0100 (CET) Subject: [pypy-commit] pypy default: export some numpy constants Message-ID: <20140312185030.F27051C029E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69907:f70fd9b07a96 Date: 2014-03-12 14:49 -0400 http://bitbucket.org/pypy/pypy/changeset/f70fd9b07a96/ Log: export some numpy constants diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -6,23 +6,26 @@ interpleveldefs = { 'ndarray': 'ndarray.W_NDimArray', 'dtype': 'descriptor.W_Dtype', + 'flatiter': 'flatiter.W_FlatIterator', + '_reconstruct' : 'ndarray._reconstruct', + 'scalar' : 'ctors.build_scalar', 'array': 'ctors.array', 'zeros': 'ctors.zeros', 'empty': 'ctors.zeros', 'empty_like': 'ctors.empty_like', - '_reconstruct' : 'ndarray._reconstruct', - 'scalar' : 'ctors.build_scalar', + 'fromstring': 'ctors.fromstring', + + 'concatenate': 'arrayops.concatenate', + 'count_nonzero': 'arrayops.count_nonzero', 'dot': 'arrayops.dot', - 'fromstring': 'ctors.fromstring', - 'flatiter': 'flatiter.W_FlatIterator', - 'concatenate': 'arrayops.concatenate', 'where': 'arrayops.where', - 'count_nonzero': 'arrayops.count_nonzero', 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', } + for c in ['CLIP', 'WRAP', 'RAISE']: + interpleveldefs[c] = 'space.wrap(constants.%s)' % c class UMathModule(MixedModule): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -246,6 +246,12 @@ return CustomIntObject(value) + def test_constants(self): + import numpy as np + assert np.CLIP is 0 + assert np.WRAP is 1 + assert np.RAISE is 2 + def test_ndarray(self): from numpy import ndarray, array, dtype, flatiter From noreply at buildbot.pypy.org Wed Mar 12 21:05:34 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 21:05:34 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: adapt to py3 Message-ID: <20140312200534.640D71C23F3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69908:394bfcf9e463 Date: 2014-03-12 13:04 -0700 http://bitbucket.org/pypy/pypy/changeset/394bfcf9e463/ Log: adapt to py3 diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -258,7 +258,7 @@ return FileAsSocket(s1), FileAsSocket(s2) def test_poll_threaded(self): - import os, select, thread, time + import os, select, _thread as thread, time if not hasattr(select, 'poll'): skip("no select.poll() on this platform") r, w = os.pipe() @@ -271,7 +271,7 @@ t = thread.start_new_thread(pollster.poll, ()) try: time.sleep(0.1) - for i in range(5): print '', # to release GIL untranslated + for i in range(5): print(''), # to release GIL untranslated # trigger ufds array reallocation for fd in rfds: pollster.unregister(fd) @@ -282,7 +282,7 @@ # and make the call to poll() from the thread return os.write(w, b'spam') time.sleep(0.1) - for i in range(5): print '', # to release GIL untranslated + for i in range(5): print(''), # to release GIL untranslated finally: os.close(r) os.close(w) From noreply at buildbot.pypy.org Wed Mar 12 21:05:35 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 21:05:35 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: workaround this obscure py3k only failure Message-ID: <20140312200535.C0B931C23F3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69909:154e2a421de8 Date: 2014-03-12 13:04 -0700 http://bitbucket.org/pypy/pypy/changeset/154e2a421de8/ Log: workaround this obscure py3k only failure diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -1,9 +1,12 @@ +from rpython.tool import udir from pypy.conftest import option class AppTestPyFrame: def setup_class(cls): + cls.w_udir = cls.space.wrap(str(udir.udir)) + cls.w_tempfile1 = cls.space.wrap(str(udir.udir.join('tempfile1'))) if not option.runappdirect: w_call_further = cls.space.appexec([], """(): def call_further(f): @@ -61,9 +64,13 @@ f.f_lineno += 1 return x - def function(): + # obscure: call open beforehand, py3k's open invokes some app + # level code that confuses our tracing (likely due to the + # testing env, otherwise it's not a problem) + f = open(self.tempfile1, 'w') + def function(f=f): xyz - with open(self.tempfile1, 'w') as f: + with f as f: pass return 3 From noreply at buildbot.pypy.org Wed Mar 12 23:27:25 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 23:27:25 +0100 (CET) Subject: [pypy-commit] pypy default: py3k compat Message-ID: <20140312222725.F19741C029E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69910:015cdb7a0f20 Date: 2014-03-12 15:20 -0700 http://bitbucket.org/pypy/pypy/changeset/015cdb7a0f20/ Log: py3k compat diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -330,12 +330,12 @@ raises(UnicodeDecodeError, decode, r"\U00110000") assert decode(r"\U00110000", "ignore") == (u"", 10) assert decode(r"\U00110000", "replace") == (u"\ufffd", 10) - exc = raises(UnicodeDecodeError, unicode_escape_decode, "\u1z32z3", 'strict') - assert str(exc.value) == "'unicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX escape" - exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, "\u1z32z3", 'strict') - assert str(exc.value) == "'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" - exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, "\U1z32z3", 'strict') - assert str(exc.value) == "'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" + exc = raises(UnicodeDecodeError, unicode_escape_decode, b"\u1z32z3", 'strict') + assert str(exc.value) == r"'unicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX escape" + exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, b"\u1z32z3", 'strict') + assert str(exc.value) == r"'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" + exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, b"\U1z32z3", 'strict') + assert str(exc.value) == r"'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" def test_escape_encode(self): assert '"'.encode('string_escape') == '"' @@ -596,7 +596,7 @@ l = [u"<%d>" % ord(exc.object[pos]) for pos in xrange(exc.start, exc.end)] return (u"[%s]" % u"".join(l), exc.end) codecs.register_error("test.handler1", handler1) - assert "\\u3042\u3xxx".decode("unicode-escape", "test.handler1") == \ + assert b"\\u3042\u3xxx".decode("unicode-escape", "test.handler1") == \ u"\u3042[<92><117><51>]xxx" def test_encode_error_bad_handler(self): @@ -649,22 +649,22 @@ def test_utf7_errors(self): import codecs tests = [ - ('a\xffb', u'a\ufffdb'), - ('a+IK', u'a\ufffd'), - ('a+IK-b', u'a\ufffdb'), - ('a+IK,b', u'a\ufffdb'), - ('a+IKx', u'a\u20ac\ufffd'), - ('a+IKx-b', u'a\u20ac\ufffdb'), - ('a+IKwgr', u'a\u20ac\ufffd'), - ('a+IKwgr-b', u'a\u20ac\ufffdb'), - ('a+IKwgr,', u'a\u20ac\ufffd'), - ('a+IKwgr,-b', u'a\u20ac\ufffd-b'), - ('a+IKwgrB', u'a\u20ac\u20ac\ufffd'), - ('a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'), - ('a+/,+IKw-b', u'a\ufffd\u20acb'), - ('a+//,+IKw-b', u'a\ufffd\u20acb'), - ('a+///,+IKw-b', u'a\uffff\ufffd\u20acb'), - ('a+////,+IKw-b', u'a\uffff\ufffd\u20acb'), + (b'a\xffb', u'a\ufffdb'), + (b'a+IK', u'a\ufffd'), + (b'a+IK-b', u'a\ufffdb'), + (b'a+IK,b', u'a\ufffdb'), + (b'a+IKx', u'a\u20ac\ufffd'), + (b'a+IKx-b', u'a\u20ac\ufffdb'), + (b'a+IKwgr', u'a\u20ac\ufffd'), + (b'a+IKwgr-b', u'a\u20ac\ufffdb'), + (b'a+IKwgr,', u'a\u20ac\ufffd'), + (b'a+IKwgr,-b', u'a\u20ac\ufffd-b'), + (b'a+IKwgrB', u'a\u20ac\u20ac\ufffd'), + (b'a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'), + (b'a+/,+IKw-b', u'a\ufffd\u20acb'), + (b'a+//,+IKw-b', u'a\ufffd\u20acb'), + (b'a+///,+IKw-b', u'a\uffff\ufffd\u20acb'), + (b'a+////,+IKw-b', u'a\uffff\ufffd\u20acb'), ] for raw, expected in tests: raises(UnicodeDecodeError, codecs.utf_7_decode, raw, 'strict', True) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -14,12 +14,12 @@ assert 'foo' in sys.modules assert "copy" in dir(module.fooType) obj = module.new() - print obj.foo + print(obj.foo) assert obj.foo == 42 - print "Obj has type", type(obj) + print("Obj has type", type(obj)) assert type(obj) is module.fooType - print "type of obj has type", type(type(obj)) - print "type of type of obj has type", type(type(type(obj))) + print("type of obj has type", type(type(obj))) + print("type of type of obj has type", type(type(type(obj)))) assert module.fooType.__doc__ == "foo is for testing." def test_typeobject_method_descriptor(self): @@ -36,7 +36,7 @@ assert repr(module.fooType.__call__) == "" assert obj2(foo=1, bar=2) == dict(foo=1, bar=2) - print obj.foo + print(obj.foo) assert obj.foo == 42 assert obj.int_member == obj.foo @@ -592,5 +592,5 @@ def test_tp_new_in_subclass_of_type(self): skip("BROKEN") module = self.import_module(name='foo3') - print 'calling module.Type()...' + print('calling module.Type()...') module.Type("X", (object,), {}) From noreply at buildbot.pypy.org Wed Mar 12 23:27:27 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 23:27:27 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: merge default Message-ID: <20140312222727.AEB521C029E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69911:0913e409b4e2 Date: 2014-03-12 15:20 -0700 http://bitbucket.org/pypy/pypy/changeset/0913e409b4e2/ Log: merge default diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -337,12 +337,12 @@ raises(UnicodeDecodeError, decode, r"\U00110000") assert decode(r"\U00110000", "ignore") == (u"", 10) assert decode(r"\U00110000", "replace") == (u"\ufffd", 10) - exc = raises(UnicodeDecodeError, unicode_escape_decode, "\u1z32z3", 'strict') - assert str(exc.value) == "'unicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX escape" - exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, "\u1z32z3", 'strict') - assert str(exc.value) == "'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" - exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, "\U1z32z3", 'strict') - assert str(exc.value) == "'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" + exc = raises(UnicodeDecodeError, unicode_escape_decode, b"\u1z32z3", 'strict') + assert str(exc.value) == r"'unicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX escape" + exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, b"\u1z32z3", 'strict') + assert str(exc.value) == r"'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" + exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, b"\U1z32z3", 'strict') + assert str(exc.value) == r"'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" def test_escape_encode(self): import _codecs @@ -653,7 +653,7 @@ l = [u"<%d>" % ord(exc.object[pos]) for pos in xrange(exc.start, exc.end)] return (u"[%s]" % u"".join(l), exc.end) codecs.register_error("test.handler1", handler1) - assert "\\u3042\u3xxx".decode("unicode-escape", "test.handler1") == \ + assert b"\\u3042\u3xxx".decode("unicode-escape", "test.handler1") == \ u"\u3042[<92><117><51>]xxx" def test_encode_error_bad_handler(self): @@ -706,22 +706,22 @@ def test_utf7_errors(self): import codecs tests = [ - ('a\xffb', u'a\ufffdb'), - ('a+IK', u'a\ufffd'), - ('a+IK-b', u'a\ufffdb'), - ('a+IK,b', u'a\ufffdb'), - ('a+IKx', u'a\u20ac\ufffd'), - ('a+IKx-b', u'a\u20ac\ufffdb'), - ('a+IKwgr', u'a\u20ac\ufffd'), - ('a+IKwgr-b', u'a\u20ac\ufffdb'), - ('a+IKwgr,', u'a\u20ac\ufffd'), - ('a+IKwgr,-b', u'a\u20ac\ufffd-b'), - ('a+IKwgrB', u'a\u20ac\u20ac\ufffd'), - ('a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'), - ('a+/,+IKw-b', u'a\ufffd\u20acb'), - ('a+//,+IKw-b', u'a\ufffd\u20acb'), - ('a+///,+IKw-b', u'a\uffff\ufffd\u20acb'), - ('a+////,+IKw-b', u'a\uffff\ufffd\u20acb'), + (b'a\xffb', u'a\ufffdb'), + (b'a+IK', u'a\ufffd'), + (b'a+IK-b', u'a\ufffdb'), + (b'a+IK,b', u'a\ufffdb'), + (b'a+IKx', u'a\u20ac\ufffd'), + (b'a+IKx-b', u'a\u20ac\ufffdb'), + (b'a+IKwgr', u'a\u20ac\ufffd'), + (b'a+IKwgr-b', u'a\u20ac\ufffdb'), + (b'a+IKwgr,', u'a\u20ac\ufffd'), + (b'a+IKwgr,-b', u'a\u20ac\ufffd-b'), + (b'a+IKwgrB', u'a\u20ac\u20ac\ufffd'), + (b'a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'), + (b'a+/,+IKw-b', u'a\ufffd\u20acb'), + (b'a+//,+IKw-b', u'a\ufffd\u20acb'), + (b'a+///,+IKw-b', u'a\uffff\ufffd\u20acb'), + (b'a+////,+IKw-b', u'a\uffff\ufffd\u20acb'), ] for raw, expected in tests: raises(UnicodeDecodeError, codecs.utf_7_decode, raw, 'strict', True) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -584,5 +584,5 @@ def test_tp_new_in_subclass_of_type(self): skip("BROKEN") module = self.import_module(name='foo3') - print 'calling module.Type()...' + print('calling module.Type()...') module.Type("X", (object,), {}) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -6,23 +6,26 @@ interpleveldefs = { 'ndarray': 'ndarray.W_NDimArray', 'dtype': 'descriptor.W_Dtype', + 'flatiter': 'flatiter.W_FlatIterator', + '_reconstruct' : 'ndarray._reconstruct', + 'scalar' : 'ctors.build_scalar', 'array': 'ctors.array', 'zeros': 'ctors.zeros', 'empty': 'ctors.zeros', 'empty_like': 'ctors.empty_like', - '_reconstruct' : 'ndarray._reconstruct', - 'scalar' : 'ctors.build_scalar', + 'fromstring': 'ctors.fromstring', + + 'concatenate': 'arrayops.concatenate', + 'count_nonzero': 'arrayops.count_nonzero', 'dot': 'arrayops.dot', - 'fromstring': 'ctors.fromstring', - 'flatiter': 'flatiter.W_FlatIterator', - 'concatenate': 'arrayops.concatenate', 'where': 'arrayops.where', - 'count_nonzero': 'arrayops.count_nonzero', 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', } + for c in ['CLIP', 'WRAP', 'RAISE']: + interpleveldefs[c] = 'space.wrap(constants.%s)' % c class UMathModule(MixedModule): diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1035,6 +1035,26 @@ value = self.get_scalar_value() return space.float(value) + def descr_hex(self, space): + if self.get_size() != 1: + raise oefmt(space.w_TypeError, + "only length-1 arrays can be converted to Python scalars") + if not self.get_dtype().is_int(): + raise oefmt(space.w_TypeError, + "don't know how to convert scalar number to hex") + value = self.get_scalar_value() + return space.hex(value) + + def descr_oct(self, space): + if self.get_size() != 1: + raise oefmt(space.w_TypeError, + "only length-1 arrays can be converted to Python scalars") + if not self.get_dtype().is_int(): + raise oefmt(space.w_TypeError, + "don't know how to convert scalar number to oct") + value = self.get_scalar_value() + return space.oct(value) + def descr_index(self, space): if self.get_size() != 1 or \ not self.get_dtype().is_int() or self.get_dtype().is_bool(): @@ -1226,6 +1246,8 @@ __str__ = interp2app(W_NDimArray.descr_str), __int__ = interp2app(W_NDimArray.descr_int), __float__ = interp2app(W_NDimArray.descr_float), + __hex__ = interp2app(W_NDimArray.descr_hex), + __oct__ = interp2app(W_NDimArray.descr_oct), __buffer__ = interp2app(W_NDimArray.descr_get_data), __index__ = interp2app(W_NDimArray.descr_index), diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -246,6 +246,12 @@ return CustomIntObject(value) + def test_constants(self): + import numpy as np + assert np.CLIP is 0 + assert np.WRAP is 1 + assert np.RAISE is 2 + def test_ndarray(self): from numpy import ndarray, array, dtype, flatiter @@ -2267,6 +2273,30 @@ exc = raises(TypeError, "float(np.array([1.5, 2.5]))") assert exc.value[0] == 'only length-1 arrays can be converted to Python scalars' + def test__hex__(self): + import numpy as np + assert hex(np.array(True)) == '0x1' + assert hex(np.array(15)) == '0xf' + assert hex(np.array([15])) == '0xf' + exc = raises(TypeError, "hex(np.array(1.5))") + assert str(exc.value) == "don't know how to convert scalar number to hex" + exc = raises(TypeError, "hex(np.array('15'))") + assert str(exc.value) == "don't know how to convert scalar number to hex" + exc = raises(TypeError, "hex(np.array([1, 2]))") + assert str(exc.value) == "only length-1 arrays can be converted to Python scalars" + + def test__oct__(self): + import numpy as np + assert oct(np.array(True)) == '01' + assert oct(np.array(15)) == '017' + assert oct(np.array([15])) == '017' + exc = raises(TypeError, "oct(np.array(1.5))") + assert str(exc.value) == "don't know how to convert scalar number to oct" + exc = raises(TypeError, "oct(np.array('15'))") + assert str(exc.value) == "don't know how to convert scalar number to oct" + exc = raises(TypeError, "oct(np.array([1, 2]))") + assert str(exc.value) == "only length-1 arrays can be converted to Python scalars" + def test__reduce__(self): from numpypy import array, dtype from cPickle import loads, dumps diff --git a/pypy/module/pypyjit/test/test_ztranslation.py b/pypy/module/pypyjit/test/test_ztranslation.py --- a/pypy/module/pypyjit/test/test_ztranslation.py +++ b/pypy/module/pypyjit/test/test_ztranslation.py @@ -1,5 +1,5 @@ +from pypy.objspace.fake.checkmodule import checkmodule -from pypy.objspace.fake.checkmodule import checkmodule def test_pypyjit_translates(): checkmodule('pypyjit') diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -7,13 +7,13 @@ from rpython.tool.udir import udir from rpython.tool import logparser from rpython.jit.tool.jitoutput import parse_prof -from pypy.module.pypyjit.test_pypy_c.model import (Log, find_ids_range, - find_ids, - OpMatcher, InvalidMatch) +from pypy.module.pypyjit.test_pypy_c.model import \ + Log, find_ids_range, find_ids, OpMatcher, InvalidMatch + class BaseTestPyPyC(object): log_string = 'jit-log-opt,jit-log-noopt,jit-log-virtualstate,jit-summary' - + def setup_class(cls): if '__pypy__' not in sys.builtin_module_names: py.test.skip("must run this test with pypy") @@ -98,7 +98,6 @@ class TestLog(object): - def test_find_ids_range(self): def f(): a = 0 # ID: myline @@ -127,7 +126,6 @@ class TestOpMatcher_(object): - def match(self, src1, src2, **kwds): from rpython.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) @@ -347,7 +345,6 @@ class TestRunPyPyC(BaseTestPyPyC): - def test_run_function(self): def f(a, b): return a+b @@ -385,7 +382,7 @@ assert len(loops) == 1 assert loops[0].filename == self.filepath assert len([op for op in loops[0].allops() if op.name == 'label']) == 0 - assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) == 0 + assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) == 0 # loops = log.loops_by_filename(self.filepath, is_entry_bridge=True) assert len(loops) == 1 @@ -454,7 +451,6 @@ # ops = loop.ops_by_id('foo', opcode='INPLACE_SUBTRACT') assert log.opnames(ops) == ['int_sub_ovf', 'guard_no_overflow'] - def test_inlined_function(self): def f(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -0,0 +1,60 @@ +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + + +class TestMicroNumPy(BaseTestPyPyC): + def test_array_getitem_basic(self): + def main(): + import _numpypy.multiarray as np + arr = np.zeros((300, 300)) + x = 150 + y = 0 + while y < 300: + a = arr[x, y] + y += 1 + return a + log = self.run(main, []) + assert log.result == 0 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i76 = int_lt(i71, 300) + guard_true(i76, descr=...) + i77 = int_ge(i71, i59) + guard_false(i77, descr=...) + i78 = int_mul(i71, i61) + i79 = int_add(i55, i78) + f80 = raw_load(i67, i79, descr=) + i81 = int_add(i71, 1) + guard_not_invalidated(descr=...) + --TICK-- + jump(p0, p1, p3, p6, p7, p12, p14, p16, i81, f80, i59, p38, i55, p40, i37, i61, i67, descr=...) + """) + + def test_array_getitem_accumulate(self): + def main(): + import _numpypy.multiarray as np + arr = np.zeros((300, 300)) + a = 0.0 + x = 150 + y = 0 + while y < 300: + a += arr[x, y] + y += 1 + return a + log = self.run(main, []) + assert log.result == 0 + loop, = log.loops_by_filename(self.filepath) + skip('used to pass on 69421-f3e717c94913') + assert loop.match(""" + i81 = int_lt(i76, 300) + guard_true(i81, descr=...) + i82 = int_ge(i76, i62) + guard_false(i82, descr=...) + i83 = int_mul(i76, i64) + i84 = int_add(i58, i83) + f85 = raw_load(i70, i84, descr=) + guard_not_invalidated(descr=...) + f86 = float_add(f74, f85) + i87 = int_add(i76, 1) + --TICK-- + jump(p0, p1, p3, p6, p7, p12, p14, f86, p18, i87, i62, p41, i58, p47, i40, i64, i70, descr=...) + """) diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -132,8 +132,8 @@ The actual return value may be determined with os.WEXITSTATUS. """ + res = 0 ll_f = self.ll_file - res = 0 if ll_f: # double close is allowed self.ll_file = lltype.nullptr(FILE) diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -1,9 +1,9 @@ - import os, sys, py from rpython.rtyper.test.tool import BaseRtypingTest from rpython.tool.udir import udir from rpython.rlib import rfile + class TestFile(BaseRtypingTest): def setup_class(cls): cls.tmpdir = udir.join('test_rfile') @@ -208,6 +208,7 @@ assert s == "%s\n" % printval assert os.WEXITSTATUS(r) == retval + class TestPopenR(BaseRtypingTest): def setup_class(cls): if sys.platform == 'win32': diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py --- a/rpython/rtyper/llannotation.py +++ b/rpython/rtyper/llannotation.py @@ -6,7 +6,7 @@ from rpython.annotator.model import ( SomeObject, SomeSingleFloat, SomeFloat, SomeLongFloat, SomeChar, SomeUnicodeCodePoint, SomeInteger, SomeString, SomeImpossibleValue, - s_None, s_Bool, UnionError, AnnotatorError) + s_None, s_Bool, UnionError, AnnotatorError, SomeBool) from rpython.rtyper.lltypesystem import lltype, llmemory class SomeAddress(SomeObject): @@ -155,7 +155,10 @@ return ll_to_annotation(v) def bool(self): - return s_Bool + result = SomeBool() + if self.is_constant(): + result.const = bool(self.const) + return result class SomeInteriorPtr(SomePtr): diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -4,7 +4,7 @@ from rpython.rtyper.lltypesystem import lltype, rstr from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.lltypesystem.llmemory import cast_ptr_to_adr -from rpython.rtyper.lltypesystem.llmemory import itemoffsetof, raw_memcopy +from rpython.rtyper.lltypesystem.llmemory import itemoffsetof from rpython.rtyper.llannotation import lltype_to_annotation from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.objectmodel import Symbolic @@ -679,7 +679,9 @@ if strtype is str: from rpython.rtyper.lltypesystem.rstr import (STR as STRTYPE, - copy_string_to_raw) + copy_string_to_raw, + copy_raw_to_string, + copy_string_contents) from rpython.rtyper.annlowlevel import llstr as llstrtype from rpython.rtyper.annlowlevel import hlstr as hlstrtype TYPEP = CCHARP @@ -689,7 +691,9 @@ else: from rpython.rtyper.lltypesystem.rstr import ( UNICODE as STRTYPE, - copy_unicode_to_raw as copy_string_to_raw) + copy_unicode_to_raw as copy_string_to_raw, + copy_raw_to_unicode as copy_raw_to_string, + copy_unicode_contents as copy_string_contents) from rpython.rtyper.annlowlevel import llunicode as llstrtype from rpython.rtyper.annlowlevel import hlunicode as hlstrtype TYPEP = CWCHARP @@ -803,17 +807,10 @@ return hlstrtype(gc_buf) new_buf = lltype.malloc(STRTYPE, needed_size) - str_chars_offset = (offsetof(STRTYPE, 'chars') + \ - itemoffsetof(STRTYPE.chars, 0)) if gc_buf: - src = cast_ptr_to_adr(gc_buf) + str_chars_offset + copy_string_contents(gc_buf, new_buf, 0, 0, needed_size) else: - src = cast_ptr_to_adr(raw_buf) + itemoffsetof(TYPEP.TO, 0) - dest = cast_ptr_to_adr(new_buf) + str_chars_offset - raw_memcopy(src, dest, - llmemory.sizeof(ll_char_type) * needed_size) - keepalive_until_here(gc_buf) - keepalive_until_here(new_buf) + copy_raw_to_string(raw_buf, new_buf, 0, needed_size) return hlstrtype(new_buf) # (char*, str) -> None diff --git a/rpython/rtyper/rtuple.py b/rpython/rtyper/rtuple.py --- a/rpython/rtyper/rtuple.py +++ b/rpython/rtyper/rtuple.py @@ -290,14 +290,15 @@ if not s_tup.is_constant(): raise TyperError("contains() on non-const tuple") t = s_tup.const - if len(t) == 0: - hop.exception_cannot_occur() - return hop.inputconst(Bool, False) + s_item = hop.args_s[1] r_item = hop.args_r[1] v_arg = hop.inputarg(r_item, arg=1) ll_eq = r_item.get_ll_eq_function() or _ll_equal v_result = None for x in t: + s_const_item = hop.rtyper.annotator.bookkeeper.immutablevalue(x) + if not s_item.contains(s_const_item): + continue # corner case, see test_constant_tuple_contains_bug c_tuple_item = hop.inputconst(r_item, x) v_equal = hop.gendirectcall(ll_eq, v_arg, c_tuple_item) if v_result is None: diff --git a/rpython/rtyper/test/test_rtuple.py b/rpython/rtyper/test/test_rtuple.py --- a/rpython/rtyper/test/test_rtuple.py +++ b/rpython/rtyper/test/test_rtuple.py @@ -95,6 +95,14 @@ res = self.interpret(f, [50]) assert res is False + def test_constant_tuple_contains_bug(self): + def f(i): + return chr(i) in ('1', '2', '34') # the '34' can never match + res = self.interpret(f, [ord('1')]) + assert res is True + res = self.interpret(f, [ord('3')]) + assert res is False + def test_conv(self): def t0(): return (3, 2, None) From noreply at buildbot.pypy.org Wed Mar 12 23:27:29 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 23:27:29 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: 2to3 Message-ID: <20140312222729.269291C029E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69912:fa6ec0ca81f5 Date: 2014-03-12 15:25 -0700 http://bitbucket.org/pypy/pypy/changeset/fa6ec0ca81f5/ Log: 2to3 diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -650,7 +650,7 @@ if not isinstance(exc, UnicodeEncodeError) \ and not isinstance(exc, UnicodeDecodeError): raise TypeError("don't know how to handle %r" % exc) - l = [u"<%d>" % ord(exc.object[pos]) for pos in xrange(exc.start, exc.end)] + l = [u"<%d>" % exc.object[pos] for pos in range(exc.start, exc.end)] return (u"[%s]" % u"".join(l), exc.end) codecs.register_error("test.handler1", handler1) assert b"\\u3042\u3xxx".decode("unicode-escape", "test.handler1") == \ From noreply at buildbot.pypy.org Wed Mar 12 23:27:30 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 23:27:30 +0100 (CET) Subject: [pypy-commit] pypy py3k-stdlib-2.7.6-merge: close before merging Message-ID: <20140312222730.4D1361C029E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-stdlib-2.7.6-merge Changeset: r69913:ac680cc4afc4 Date: 2014-03-12 15:25 -0700 http://bitbucket.org/pypy/pypy/changeset/ac680cc4afc4/ Log: close before merging From noreply at buildbot.pypy.org Wed Mar 12 23:27:38 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Mar 2014 23:27:38 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge py3k-stdlib-2.7.6-merge Message-ID: <20140312222738.683EE1C029E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69914:8a77a2e7edcd Date: 2014-03-12 15:26 -0700 http://bitbucket.org/pypy/pypy/changeset/8a77a2e7edcd/ Log: merge py3k-stdlib-2.7.6-merge diff too long, truncating to 2000 out of 45320 lines diff --git a/lib-python/2.7/BaseHTTPServer.py b/lib-python/2.7/BaseHTTPServer.py --- a/lib-python/2.7/BaseHTTPServer.py +++ b/lib-python/2.7/BaseHTTPServer.py @@ -447,13 +447,13 @@ specified as subsequent arguments (it's just like printf!). - The client host and current date/time are prefixed to - every message. + The client ip address and current date/time are prefixed to every + message. """ sys.stderr.write("%s - - [%s] %s\n" % - (self.address_string(), + (self.client_address[0], self.log_date_time_string(), format%args)) diff --git a/lib-python/2.7/CGIHTTPServer.py b/lib-python/2.7/CGIHTTPServer.py --- a/lib-python/2.7/CGIHTTPServer.py +++ b/lib-python/2.7/CGIHTTPServer.py @@ -84,9 +84,11 @@ path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ - splitpath = _url_collapse_path_split(self.path) - if splitpath[0] in self.cgi_directories: - self.cgi_info = splitpath + collapsed_path = _url_collapse_path(self.path) + dir_sep = collapsed_path.find('/', 1) + head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] + if head in self.cgi_directories: + self.cgi_info = head, tail return True return False @@ -103,18 +105,17 @@ def run_cgi(self): """Execute a CGI script.""" - path = self.path dir, rest = self.cgi_info - i = path.find('/', len(dir) + 1) + i = rest.find('/') while i >= 0: - nextdir = path[:i] - nextrest = path[i+1:] + nextdir = rest[:i] + nextrest = rest[i+1:] scriptdir = self.translate_path(nextdir) if os.path.isdir(scriptdir): dir, rest = nextdir, nextrest - i = path.find('/', len(dir) + 1) + i = rest.find('/') else: break @@ -298,44 +299,46 @@ self.log_message("CGI script exited OK") -# TODO(gregory.p.smith): Move this into an appropriate library. -def _url_collapse_path_split(path): +def _url_collapse_path(path): """ Given a URL path, remove extra '/'s and '.' path elements and collapse - any '..' references. + any '..' references and returns a colllapsed path. Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. + The utility of this function is limited to is_cgi method and helps + preventing some security attacks. Returns: A tuple of (head, tail) where tail is everything after the final / and head is everything before it. Head will always start with a '/' and, if it contains anything else, never have a trailing '/'. Raises: IndexError if too many '..' occur within the path. + """ # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. - path_parts = [] - for part in path.split('/'): - if part == '.': - path_parts.append('') - else: - path_parts.append(part) - # Filter out blank non trailing parts before consuming the '..'. - path_parts = [part for part in path_parts[:-1] if part] + path_parts[-1:] + path_parts = path.split('/') + head_parts = [] + for part in path_parts[:-1]: + if part == '..': + head_parts.pop() # IndexError if more '..' than prior parts + elif part and part != '.': + head_parts.append( part ) if path_parts: tail_part = path_parts.pop() + if tail_part: + if tail_part == '..': + head_parts.pop() + tail_part = '' + elif tail_part == '.': + tail_part = '' else: tail_part = '' - head_parts = [] - for part in path_parts: - if part == '..': - head_parts.pop() - else: - head_parts.append(part) - if tail_part and tail_part == '..': - head_parts.pop() - tail_part = '' - return ('/' + '/'.join(head_parts), tail_part) + + splitpath = ('/' + '/'.join(head_parts), tail_part) + collapsed_path = "/".join(splitpath) + + return collapsed_path nobody = None diff --git a/lib-python/2.7/Cookie.py b/lib-python/2.7/Cookie.py --- a/lib-python/2.7/Cookie.py +++ b/lib-python/2.7/Cookie.py @@ -238,7 +238,7 @@ # a two-way quoting algorithm. Any non-text character is translated # into a 4 character sequence: a forward-slash followed by the # three-digit octal equivalent of the character. Any '\' or '"' is -# quoted with a preceeding '\' slash. +# quoted with a preceding '\' slash. # # These are taken from RFC2068 and RFC2109. # _LegalChars is the list of chars which don't require "'s @@ -390,7 +390,7 @@ from time import gmtime, time now = time() year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future) - return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \ + return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \ (weekdayname[wd], day, monthname[month], year, hh, mm, ss) @@ -539,7 +539,7 @@ r"(?P" # Start of group 'val' r'"(?:[^\\"]|\\.)*"' # Any doublequoted string r"|" # or - r"\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr + r"\w{3},\s[\s\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr r"|" # or ""+ _LegalCharsPatt +"*" # Any word or empty string r")" # End of group 'val' diff --git a/lib-python/2.7/HTMLParser.py b/lib-python/2.7/HTMLParser.py --- a/lib-python/2.7/HTMLParser.py +++ b/lib-python/2.7/HTMLParser.py @@ -22,13 +22,13 @@ starttagopen = re.compile('<[a-zA-Z]') piclose = re.compile('>') commentclose = re.compile(r'--\s*>') -tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*') +tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*') # see http://www.w3.org/TR/html5/tokenization.html#tag-open-state # and http://www.w3.org/TR/html5/tokenization.html#tag-name-state tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*') attrfind = re.compile( - r'[\s/]*((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*' + r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*' r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') locatestarttagend = re.compile(r""" @@ -289,7 +289,7 @@ match = tagfind.match(rawdata, i+1) assert match, 'unexpected call to parse_starttag()' k = match.end() - self.lasttag = tag = rawdata[i+1:k].lower() + self.lasttag = tag = match.group(1).lower() while k < endpos: m = attrfind.match(rawdata, k) diff --git a/lib-python/2.7/Queue.py b/lib-python/2.7/Queue.py --- a/lib-python/2.7/Queue.py +++ b/lib-python/2.7/Queue.py @@ -109,7 +109,7 @@ If optional args 'block' is true and 'timeout' is None (the default), block if necessary until a free slot is available. If 'timeout' is - a positive number, it blocks at most 'timeout' seconds and raises + a non-negative number, it blocks at most 'timeout' seconds and raises the Full exception if no free slot was available within that time. Otherwise ('block' is false), put an item on the queue if a free slot is immediately available, else raise the Full exception ('timeout' @@ -125,7 +125,7 @@ while self._qsize() == self.maxsize: self.not_full.wait() elif timeout < 0: - raise ValueError("'timeout' must be a positive number") + raise ValueError("'timeout' must be a non-negative number") else: endtime = _time() + timeout while self._qsize() == self.maxsize: @@ -152,7 +152,7 @@ If optional args 'block' is true and 'timeout' is None (the default), block if necessary until an item is available. If 'timeout' is - a positive number, it blocks at most 'timeout' seconds and raises + a non-negative number, it blocks at most 'timeout' seconds and raises the Empty exception if no item was available within that time. Otherwise ('block' is false), return an item if one is immediately available, else raise the Empty exception ('timeout' is ignored @@ -167,7 +167,7 @@ while not self._qsize(): self.not_empty.wait() elif timeout < 0: - raise ValueError("'timeout' must be a positive number") + raise ValueError("'timeout' must be a non-negative number") else: endtime = _time() + timeout while not self._qsize(): diff --git a/lib-python/2.7/SimpleHTTPServer.py b/lib-python/2.7/SimpleHTTPServer.py --- a/lib-python/2.7/SimpleHTTPServer.py +++ b/lib-python/2.7/SimpleHTTPServer.py @@ -149,6 +149,8 @@ # abandon query parameters path = path.split('?',1)[0] path = path.split('#',1)[0] + # Don't forget explicit trailing slash when normalizing. Issue17324 + trailing_slash = path.rstrip().endswith('/') path = posixpath.normpath(urllib.unquote(path)) words = path.split('/') words = filter(None, words) @@ -158,6 +160,8 @@ head, word = os.path.split(word) if word in (os.curdir, os.pardir): continue path = os.path.join(path, word) + if trailing_slash: + path += '/' return path def copyfile(self, source, outputfile): diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -1,4 +1,4 @@ -"""Simple XML-RPC Server. +r"""Simple XML-RPC Server. This module can be used to create simple XML-RPC servers by creating a server and either installing functions, a diff --git a/lib-python/2.7/SocketServer.py b/lib-python/2.7/SocketServer.py --- a/lib-python/2.7/SocketServer.py +++ b/lib-python/2.7/SocketServer.py @@ -133,6 +133,7 @@ import select import sys import os +import errno try: import threading except ImportError: @@ -147,6 +148,15 @@ "ThreadingUnixStreamServer", "ThreadingUnixDatagramServer"]) +def _eintr_retry(func, *args): + """restart a system call interrupted by EINTR""" + while True: + try: + return func(*args) + except (OSError, select.error) as e: + if e.args[0] != errno.EINTR: + raise + class BaseServer: """Base class for server classes. @@ -222,7 +232,8 @@ # connecting to the socket to wake this up instead of # polling. Polling reduces our responsiveness to a # shutdown request and wastes cpu at all other times. - r, w, e = select.select([self], [], [], poll_interval) + r, w, e = _eintr_retry(select.select, [self], [], [], + poll_interval) if self in r: self._handle_request_noblock() finally: @@ -262,7 +273,7 @@ timeout = self.timeout elif self.timeout is not None: timeout = min(timeout, self.timeout) - fd_sets = select.select([self], [], [], timeout) + fd_sets = _eintr_retry(select.select, [self], [], [], timeout) if not fd_sets[0]: self.handle_timeout() return @@ -690,7 +701,12 @@ def finish(self): if not self.wfile.closed: - self.wfile.flush() + try: + self.wfile.flush() + except socket.error: + # An final socket error may have occurred here, such as + # the local error ECONNABORTED. + pass self.wfile.close() self.rfile.close() diff --git a/lib-python/2.7/StringIO.py b/lib-python/2.7/StringIO.py --- a/lib-python/2.7/StringIO.py +++ b/lib-python/2.7/StringIO.py @@ -158,7 +158,7 @@ newpos = self.len else: newpos = i+1 - if length is not None and length > 0: + if length is not None and length >= 0: if self.pos + length < newpos: newpos = self.pos + length r = self.buf[self.pos:newpos] diff --git a/lib-python/2.7/_LWPCookieJar.py b/lib-python/2.7/_LWPCookieJar.py --- a/lib-python/2.7/_LWPCookieJar.py +++ b/lib-python/2.7/_LWPCookieJar.py @@ -48,7 +48,7 @@ class LWPCookieJar(FileCookieJar): """ - The LWPCookieJar saves a sequence of"Set-Cookie3" lines. + The LWPCookieJar saves a sequence of "Set-Cookie3" lines. "Set-Cookie3" is the format used by the libwww-perl libary, not known to be compatible with any browser, but which is easy to read and doesn't lose information about RFC 2965 cookies. @@ -60,7 +60,7 @@ """ def as_lwp_str(self, ignore_discard=True, ignore_expires=True): - """Return cookies as a string of "\n"-separated "Set-Cookie3" headers. + """Return cookies as a string of "\\n"-separated "Set-Cookie3" headers. ignore_discard and ignore_expires: see docstring for FileCookieJar.save diff --git a/lib-python/2.7/__future__.py b/lib-python/2.7/__future__.py --- a/lib-python/2.7/__future__.py +++ b/lib-python/2.7/__future__.py @@ -112,7 +112,7 @@ CO_FUTURE_DIVISION) absolute_import = _Feature((2, 5, 0, "alpha", 1), - (2, 7, 0, "alpha", 0), + (3, 0, 0, "alpha", 0), CO_FUTURE_ABSOLUTE_IMPORT) with_statement = _Feature((2, 5, 0, "alpha", 1), diff --git a/lib-python/2.7/_abcoll.py b/lib-python/2.7/_abcoll.py --- a/lib-python/2.7/_abcoll.py +++ b/lib-python/2.7/_abcoll.py @@ -74,6 +74,7 @@ @abstractmethod def next(self): + 'Return the next item from the iterator. When exhausted, raise StopIteration' raise StopIteration def __iter__(self): @@ -194,6 +195,7 @@ return self._from_iterable(value for value in other if value in self) def isdisjoint(self, other): + 'Return True if two sets have a null intersection.' for value in other: if value in self: return False @@ -259,6 +261,16 @@ class MutableSet(Set): + """A mutable set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__, __len__, + add(), and discard(). + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + """ @abstractmethod def add(self, value): @@ -333,11 +345,20 @@ class Mapping(Sized, Iterable, Container): + """A Mapping is a generic container for associating key/value + pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __iter__, and __len__. + + """ + @abstractmethod def __getitem__(self, key): raise KeyError def get(self, key, default=None): + 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' try: return self[key] except KeyError: @@ -352,23 +373,29 @@ return True def iterkeys(self): + 'D.iterkeys() -> an iterator over the keys of D' return iter(self) def itervalues(self): + 'D.itervalues() -> an iterator over the values of D' for key in self: yield self[key] def iteritems(self): + 'D.iteritems() -> an iterator over the (key, value) items of D' for key in self: yield (key, self[key]) def keys(self): + "D.keys() -> list of D's keys" return list(self) def items(self): + "D.items() -> list of D's (key, value) pairs, as 2-tuples" return [(key, self[key]) for key in self] def values(self): + "D.values() -> list of D's values" return [self[key] for key in self] # Mappings are not hashable by default, but subclasses can change this @@ -443,6 +470,15 @@ class MutableMapping(Mapping): + """A MutableMapping is a generic container for associating + key/value pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __setitem__, __delitem__, + __iter__, and __len__. + + """ + @abstractmethod def __setitem__(self, key, value): raise KeyError @@ -454,6 +490,9 @@ __marker = object() def pop(self, key, default=__marker): + '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + ''' try: value = self[key] except KeyError: @@ -465,6 +504,9 @@ return value def popitem(self): + '''D.popitem() -> (k, v), remove and return some (key, value) pair + as a 2-tuple; but raise KeyError if D is empty. + ''' try: key = next(iter(self)) except StopIteration: @@ -474,6 +516,7 @@ return key, value def clear(self): + 'D.clear() -> None. Remove all items from D.' try: while True: self.popitem() @@ -481,6 +524,11 @@ pass def update(*args, **kwds): + ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. + If E present and has a .keys() method, does: for k in E: D[k] = E[k] + If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v + In either case, this is followed by: for k, v in F.items(): D[k] = v + ''' if len(args) > 2: raise TypeError("update() takes at most 2 positional " "arguments ({} given)".format(len(args))) @@ -502,6 +550,7 @@ self[key] = value def setdefault(self, key, default=None): + 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' try: return self[key] except KeyError: @@ -546,12 +595,16 @@ yield self[i] def index(self, value): + '''S.index(value) -> integer -- return first index of value. + Raises ValueError if the value is not present. + ''' for i, v in enumerate(self): if v == value: return i raise ValueError def count(self, value): + 'S.count(value) -> integer -- return number of occurrences of value' return sum(1 for v in self if v == value) Sequence.register(tuple) @@ -562,6 +615,13 @@ class MutableSequence(Sequence): + """All the operations on a read-only sequence. + + Concrete subclasses must provide __new__ or __init__, + __getitem__, __setitem__, __delitem__, __len__, and insert(). + + """ + @abstractmethod def __setitem__(self, index, value): raise IndexError @@ -572,26 +632,36 @@ @abstractmethod def insert(self, index, value): + 'S.insert(index, object) -- insert object before index' raise IndexError def append(self, value): + 'S.append(object) -- append object to the end of the sequence' self.insert(len(self), value) def reverse(self): + 'S.reverse() -- reverse *IN PLACE*' n = len(self) for i in range(n//2): self[i], self[n-i-1] = self[n-i-1], self[i] def extend(self, values): + 'S.extend(iterable) -- extend sequence by appending elements from the iterable' for v in values: self.append(v) def pop(self, index=-1): + '''S.pop([index]) -> item -- remove and return item at index (default last). + Raise IndexError if list is empty or index is out of range. + ''' v = self[index] del self[index] return v def remove(self, value): + '''S.remove(value) -- remove first occurrence of value. + Raise ValueError if the value is not present. + ''' del self[self.index(value)] def __iadd__(self, values): diff --git a/lib-python/2.7/_osx_support.py b/lib-python/2.7/_osx_support.py new file mode 100644 --- /dev/null +++ b/lib-python/2.7/_osx_support.py @@ -0,0 +1,494 @@ +"""Shared OS X support functions.""" + +import os +import re +import sys + +__all__ = [ + 'compiler_fixup', + 'customize_config_vars', + 'customize_compiler', + 'get_platform_osx', +] + +# configuration variables that may contain universal build flags, +# like "-arch" or "-isdkroot", that may need customization for +# the user environment +_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS', + 'BLDSHARED', 'LDSHARED', 'CC', 'CXX', + 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS', + 'PY_CORE_CFLAGS') + +# configuration variables that may contain compiler calls +_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX') + +# prefix added to original configuration variable names +_INITPRE = '_OSX_SUPPORT_INITIAL_' + + +def _find_executable(executable, path=None): + """Tries to find 'executable' in the directories listed in 'path'. + + A string listing directories separated by 'os.pathsep'; defaults to + os.environ['PATH']. Returns the complete filename or None if not found. + """ + if path is None: + path = os.environ['PATH'] + + paths = path.split(os.pathsep) + base, ext = os.path.splitext(executable) + + if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'): + executable = executable + '.exe' + + if not os.path.isfile(executable): + for p in paths: + f = os.path.join(p, executable) + if os.path.isfile(f): + # the file exists, we have a shot at spawn working + return f + return None + else: + return executable + + +def _read_output(commandstring): + """Output from successful command execution or None""" + # Similar to os.popen(commandstring, "r").read(), + # but without actually using os.popen because that + # function is not usable during python bootstrap. + # tempfile is also not available then. + import contextlib + try: + import tempfile + fp = tempfile.NamedTemporaryFile() + except ImportError: + fp = open("/tmp/_osx_support.%s"%( + os.getpid(),), "w+b") + + with contextlib.closing(fp) as fp: + cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) + return fp.read().strip() if not os.system(cmd) else None + + +def _find_build_tool(toolname): + """Find a build tool on current path or using xcrun""" + return (_find_executable(toolname) + or _read_output("/usr/bin/xcrun -find %s" % (toolname,)) + or '' + ) + +_SYSTEM_VERSION = None + +def _get_system_version(): + """Return the OS X system version as a string""" + # Reading this plist is a documented way to get the system + # version (see the documentation for the Gestalt Manager) + # We avoid using platform.mac_ver to avoid possible bootstrap issues during + # the build of Python itself (distutils is used to build standard library + # extensions). + + global _SYSTEM_VERSION + + if _SYSTEM_VERSION is None: + _SYSTEM_VERSION = '' + try: + f = open('/System/Library/CoreServices/SystemVersion.plist') + except IOError: + # We're on a plain darwin box, fall back to the default + # behaviour. + pass + else: + try: + m = re.search(r'ProductUserVisibleVersion\s*' + r'(.*?)', f.read()) + finally: + f.close() + if m is not None: + _SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + + return _SYSTEM_VERSION + +def _remove_original_values(_config_vars): + """Remove original unmodified values for testing""" + # This is needed for higher-level cross-platform tests of get_platform. + for k in list(_config_vars): + if k.startswith(_INITPRE): + del _config_vars[k] + +def _save_modified_value(_config_vars, cv, newvalue): + """Save modified and original unmodified value of configuration var""" + + oldvalue = _config_vars.get(cv, '') + if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars): + _config_vars[_INITPRE + cv] = oldvalue + _config_vars[cv] = newvalue + +def _supports_universal_builds(): + """Returns True if universal builds are supported on this system""" + # As an approximation, we assume that if we are running on 10.4 or above, + # then we are running with an Xcode environment that supports universal + # builds, in particular -isysroot and -arch arguments to the compiler. This + # is in support of allowing 10.4 universal builds to run on 10.3.x systems. + + osx_version = _get_system_version() + if osx_version: + try: + osx_version = tuple(int(i) for i in osx_version.split('.')) + except ValueError: + osx_version = '' + return bool(osx_version >= (10, 4)) if osx_version else False + + +def _find_appropriate_compiler(_config_vars): + """Find appropriate C compiler for extension module builds""" + + # Issue #13590: + # The OSX location for the compiler varies between OSX + # (or rather Xcode) releases. With older releases (up-to 10.5) + # the compiler is in /usr/bin, with newer releases the compiler + # can only be found inside Xcode.app if the "Command Line Tools" + # are not installed. + # + # Futhermore, the compiler that can be used varies between + # Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2' + # as the compiler, after that 'clang' should be used because + # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that + # miscompiles Python. + + # skip checks if the compiler was overriden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + # The CC config var might contain additional arguments. + # Ignore them while searching. + cc = oldcc = _config_vars['CC'].split()[0] + if not _find_executable(cc): + # Compiler is not found on the shell search PATH. + # Now search for clang, first on PATH (if the Command LIne + # Tools have been installed in / or if the user has provided + # another location via CC). If not found, try using xcrun + # to find an uninstalled clang (within a selected Xcode). + + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself (and os.popen is + # implemented on top of subprocess and is therefore not + # usable as well) + + cc = _find_build_tool('clang') + + elif os.path.basename(cc).startswith('gcc'): + # Compiler is GCC, check if it is LLVM-GCC + data = _read_output("'%s' --version" + % (cc.replace("'", "'\"'\"'"),)) + if 'llvm-gcc' in data: + # Found LLVM-GCC, fall back to clang + cc = _find_build_tool('clang') + + if not cc: + raise SystemError( + "Cannot locate working compiler") + + if cc != oldcc: + # Found a replacement compiler. + # Modify config vars using new compiler, if not already explicitly + # overriden by an env variable, preserving additional arguments. + for cv in _COMPILER_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + cv_split = _config_vars[cv].split() + cv_split[0] = cc if cv != 'CXX' else cc + '++' + _save_modified_value(_config_vars, cv, ' '.join(cv_split)) + + return _config_vars + + +def _remove_universal_flags(_config_vars): + """Remove all universal build arguments from config vars""" + + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overriden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = re.sub('-isysroot [^ \t]*', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _remove_unsupported_archs(_config_vars): + """Remove any unsupported archs from config vars""" + # Different Xcode releases support different sets for '-arch' + # flags. In particular, Xcode 4.x no longer supports the + # PPC architectures. + # + # This code automatically removes '-arch ppc' and '-arch ppc64' + # when these are not supported. That makes it possible to + # build extensions on OSX 10.7 and later with the prebuilt + # 32-bit installer on the python.org website. + + # skip checks if the compiler was overriden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None: + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself + status = os.system( + """echo 'int main{};' | """ + """'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null""" + %(_config_vars['CC'].replace("'", "'\"'\"'"),)) + if status: + # The compile failed for some reason. Because of differences + # across Xcode and compiler versions, there is no reliable way + # to be sure why it failed. Assume here it was due to lack of + # PPC support and remove the related '-arch' flags from each + # config variables not explicitly overriden by an environment + # variable. If the error was for some other reason, we hope the + # failure will show up again when trying to compile an extension + # module. + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub('-arch\s+ppc\w*\s', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _override_all_archs(_config_vars): + """Allow override of all archs with ARCHFLAGS env var""" + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and '-arch' in _config_vars[cv]: + flags = _config_vars[cv] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _check_for_unavailable_sdk(_config_vars): + """Remove references to any SDKs not available""" + # If we're on OSX 10.5 or later and the user tries to + # compile an extension using an SDK that is not present + # on the current machine it is better to not use an SDK + # than to fail. This is particularly important with + # the standalone Command Line Tools alternative to a + # full-blown Xcode install since the CLT packages do not + # provide SDKs. If the SDK is not present, it is assumed + # that the header files and dev libs have been installed + # to /usr and /System/Library by either a standalone CLT + # package or the CLT component within Xcode. + cflags = _config_vars.get('CFLAGS', '') + m = re.search(r'-isysroot\s+(\S+)', cflags) + if m is not None: + sdk = m.group(1) + if not os.path.exists(sdk): + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overriden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def compiler_fixup(compiler_so, cc_args): + """ + This function will strip '-isysroot PATH' and '-arch ARCH' from the + compile flags if the user has specified one them in extra_compile_flags. + + This is needed because '-arch ARCH' adds another architecture to the + build, without a way to remove an architecture. Furthermore GCC will + barf if multiple '-isysroot' arguments are present. + """ + stripArch = stripSysroot = False + + compiler_so = list(compiler_so) + + if not _supports_universal_builds(): + # OSX before 10.4.0, these don't support -arch and -isysroot at + # all. + stripArch = stripSysroot = True + else: + stripArch = '-arch' in cc_args + stripSysroot = '-isysroot' in cc_args + + if stripArch or 'ARCHFLAGS' in os.environ: + while True: + try: + index = compiler_so.index('-arch') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + if 'ARCHFLAGS' in os.environ and not stripArch: + # User specified different -arch flags in the environ, + # see also distutils.sysconfig + compiler_so = compiler_so + os.environ['ARCHFLAGS'].split() + + if stripSysroot: + while True: + try: + index = compiler_so.index('-isysroot') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + # Check if the SDK that is used during compilation actually exists, + # the universal build requires the usage of a universal SDK and not all + # users have that installed by default. + sysroot = None + if '-isysroot' in cc_args: + idx = cc_args.index('-isysroot') + sysroot = cc_args[idx+1] + elif '-isysroot' in compiler_so: + idx = compiler_so.index('-isysroot') + sysroot = compiler_so[idx+1] + + if sysroot and not os.path.isdir(sysroot): + from distutils import log + log.warn("Compiling with an SDK that doesn't seem to exist: %s", + sysroot) + log.warn("Please check your Xcode installation") + + return compiler_so + + +def customize_config_vars(_config_vars): + """Customize Python build configuration variables. + + Called internally from sysconfig with a mutable mapping + containing name/value pairs parsed from the configured + makefile used to build this interpreter. Returns + the mapping updated as needed to reflect the environment + in which the interpreter is running; in the case of + a Python from a binary installer, the installed + environment may be very different from the build + environment, i.e. different OS levels, different + built tools, different available CPU architectures. + + This customization is performed whenever + distutils.sysconfig.get_config_vars() is first + called. It may be used in environments where no + compilers are present, i.e. when installing pure + Python dists. Customization of compiler paths + and detection of unavailable archs is deferred + until the first extension module build is + requested (in distutils.sysconfig.customize_compiler). + + Currently called from distutils.sysconfig + """ + + if not _supports_universal_builds(): + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + _remove_universal_flags(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + # Remove references to sdks that are not found + _check_for_unavailable_sdk(_config_vars) + + return _config_vars + + +def customize_compiler(_config_vars): + """Customize compiler path and configuration variables. + + This customization is performed when the first + extension module build is requested + in distutils.sysconfig.customize_compiler). + """ + + # Find a compiler to use for extension module builds + _find_appropriate_compiler(_config_vars) + + # Remove ppc arch flags if not supported here + _remove_unsupported_archs(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + return _config_vars + + +def get_platform_osx(_config_vars, osname, release, machine): + """Filter values for get_platform()""" + # called from get_platform() in sysconfig and distutils.util + # + # For our purposes, we'll assume that the system version from + # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set + # to. This makes the compatibility story a bit more sane because the + # machine is going to compile and link as if it were + # MACOSX_DEPLOYMENT_TARGET. + + macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '') + macrelease = _get_system_version() or macver + macver = macver or macrelease + + if macver: + release = macver + osname = "macosx" + + # Use the original CFLAGS value, if available, so that we + # return the same machine type for the platform string. + # Otherwise, distutils may consider this a cross-compiling + # case and disallow installs. + cflags = _config_vars.get(_INITPRE+'CFLAGS', + _config_vars.get('CFLAGS', '')) + if ((macrelease + '.') >= '10.4.' and + '-arch' in cflags.strip()): + # The universal build will build fat binaries, but not on + # systems before 10.4 + + machine = 'fat' + + archs = re.findall('-arch\s+(\S+)', cflags) + archs = tuple(sorted(set(archs))) + + if len(archs) == 1: + machine = archs[0] + elif archs == ('i386', 'ppc'): + machine = 'fat' + elif archs == ('i386', 'x86_64'): + machine = 'intel' + elif archs == ('i386', 'ppc', 'x86_64'): + machine = 'fat3' + elif archs == ('ppc64', 'x86_64'): + machine = 'fat64' + elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): + machine = 'universal' + else: + raise ValueError( + "Don't know machine value for archs=%r" % (archs,)) + + elif machine == 'i386': + # On OSX the machine type returned by uname is always the + # 32-bit variant, even if the executable architecture is + # the 64-bit variant + if sys.maxint >= 2**32: + machine = 'x86_64' + + elif machine in ('PowerPC', 'Power_Macintosh'): + # Pick a sane name for the PPC architecture. + # See 'i386' case + if sys.maxint >= 2**32: + machine = 'ppc64' + else: + machine = 'ppc' + + return (osname, release, machine) diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -298,7 +298,7 @@ def seek(self, pos, whence=0): """Change stream position. - Change the stream position to byte offset offset. offset is + Change the stream position to byte offset pos. Argument pos is interpreted relative to the position indicated by whence. Values for whence are: @@ -340,8 +340,10 @@ This method has no effect if the file is already closed. """ if not self.__closed: - self.flush() - self.__closed = True + try: + self.flush() + finally: + self.__closed = True def __del__(self): """Destructor. Calls close().""" @@ -883,12 +885,18 @@ return pos def readable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True def writable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True @@ -1451,7 +1459,7 @@ enabled. With this enabled, on input, the lines endings '\n', '\r', or '\r\n' are translated to '\n' before being returned to the caller. Conversely, on output, '\n' is translated to the system - default line seperator, os.linesep. If newline is any other of its + default line separator, os.linesep. If newline is any other of its legal values, that newline becomes the newline when the file is read and it is returned untranslated. On output, '\n' is converted to the newline. @@ -1546,6 +1554,8 @@ return self._buffer def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return self._seekable def readable(self): @@ -1560,8 +1570,10 @@ def close(self): if self.buffer is not None and not self.closed: - self.flush() - self.buffer.close() + try: + self.flush() + finally: + self.buffer.close() @property def closed(self): diff --git a/lib-python/2.7/_strptime.py b/lib-python/2.7/_strptime.py --- a/lib-python/2.7/_strptime.py +++ b/lib-python/2.7/_strptime.py @@ -222,7 +222,7 @@ """Convert a list to a regex string for matching a directive. Want possible matching values to be from longest to shortest. This - prevents the possibility of a match occuring for a value that also + prevents the possibility of a match occurring for a value that also a substring of a larger value that should have matched (e.g., 'abc' matching when 'abcdef' should have been the match). @@ -326,7 +326,8 @@ if len(data_string) != found.end(): raise ValueError("unconverted data remains: %s" % data_string[found.end():]) - year = 1900 + + year = None month = day = 1 hour = minute = second = fraction = 0 tz = -1 @@ -425,6 +426,12 @@ else: tz = value break + leap_year_fix = False + if year is None and month == 2 and day == 29: + year = 1904 # 1904 is first leap year of 20th century + leap_year_fix = True + elif year is None: + year = 1900 # If we know the week of the year and what day of that week, we can figure # out the Julian day of the year. if julian == -1 and week_of_year != -1 and weekday != -1: @@ -446,6 +453,12 @@ day = datetime_result.day if weekday == -1: weekday = datetime_date(year, month, day).weekday() + if leap_year_fix: + # the caller didn't supply a year but asked for Feb 29th. We couldn't + # use the default of 1900 for computations. We set it back to ensure + # that February 29th is smaller than March 1st. + year = 1900 + return (time.struct_time((year, month, day, hour, minute, second, weekday, julian, tz)), fraction) diff --git a/lib-python/2.7/_weakrefset.py b/lib-python/2.7/_weakrefset.py --- a/lib-python/2.7/_weakrefset.py +++ b/lib-python/2.7/_weakrefset.py @@ -63,7 +63,7 @@ yield item def __len__(self): - return sum(x() is not None for x in self.data) + return len(self.data) - len(self._pending_removals) def __contains__(self, item): try: @@ -116,36 +116,21 @@ def update(self, other): if self._pending_removals: self._commit_removals() - if isinstance(other, self.__class__): - self.data.update(other.data) - else: - for element in other: - self.add(element) + for element in other: + self.add(element) def __ior__(self, other): self.update(other) return self - # Helper functions for simple delegating methods. - def _apply(self, other, method): - if not isinstance(other, self.__class__): - other = self.__class__(other) - newdata = method(other.data) - newset = self.__class__() - newset.data = newdata + def difference(self, other): + newset = self.copy() + newset.difference_update(other) return newset - - def difference(self, other): - return self._apply(other, self.data.difference) __sub__ = difference def difference_update(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.difference_update(ref(item) for item in other) + self.__isub__(other) def __isub__(self, other): if self._pending_removals: self._commit_removals() @@ -156,13 +141,11 @@ return self def intersection(self, other): - return self._apply(other, self.data.intersection) + return self.__class__(item for item in other if item in self) __and__ = intersection def intersection_update(self, other): - if self._pending_removals: - self._commit_removals() - self.data.intersection_update(ref(item) for item in other) + self.__iand__(other) def __iand__(self, other): if self._pending_removals: self._commit_removals() @@ -171,45 +154,48 @@ def issubset(self, other): return self.data.issubset(ref(item) for item in other) - __lt__ = issubset + __le__ = issubset - def __le__(self, other): - return self.data <= set(ref(item) for item in other) + def __lt__(self, other): + return self.data < set(ref(item) for item in other) def issuperset(self, other): return self.data.issuperset(ref(item) for item in other) - __gt__ = issuperset + __ge__ = issuperset - def __ge__(self, other): - return self.data >= set(ref(item) for item in other) + def __gt__(self, other): + return self.data > set(ref(item) for item in other) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.data == set(ref(item) for item in other) + def __ne__(self, other): + opposite = self.__eq__(other) + if opposite is NotImplemented: + return NotImplemented + return not opposite + def symmetric_difference(self, other): - return self._apply(other, self.data.symmetric_difference) + newset = self.copy() + newset.symmetric_difference_update(other) + return newset __xor__ = symmetric_difference def symmetric_difference_update(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.symmetric_difference_update(ref(item) for item in other) + self.__ixor__(other) def __ixor__(self, other): if self._pending_removals: self._commit_removals() if self is other: self.data.clear() else: - self.data.symmetric_difference_update(ref(item) for item in other) + self.data.symmetric_difference_update(ref(item, self._remove) for item in other) return self def union(self, other): - return self._apply(other, self.data.union) + return self.__class__(e for s in (self, other) for e in s) __or__ = union def isdisjoint(self, other): diff --git a/lib-python/2.7/aifc.py b/lib-python/2.7/aifc.py --- a/lib-python/2.7/aifc.py +++ b/lib-python/2.7/aifc.py @@ -123,7 +123,7 @@ compression type, and then write audio frames using writeframesraw. When all frames have been written, either call writeframes('') or close() to patch up the sizes in the header. -Marks can be added anytime. If there are any marks, ypu must call +Marks can be added anytime. If there are any marks, you must call close() after all frames have been written. The close() method is called automatically when the class instance is destroyed. @@ -480,31 +480,30 @@ pass else: self._convert = self._adpcm2lin - self._framesize = self._framesize // 4 + self._sampwidth = 2 return # for ULAW and ALAW try Compression Library try: import cl except ImportError: - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): try: import audioop self._convert = self._ulaw2lin - self._framesize = self._framesize // 2 + self._sampwidth = 2 return except ImportError: pass raise Error, 'cannot read compressed AIFF-C files' - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): scheme = cl.G711_ULAW - self._framesize = self._framesize // 2 - elif self._comptype == 'ALAW': + elif self._comptype in ('ALAW', 'alaw'): scheme = cl.G711_ALAW - self._framesize = self._framesize // 2 else: raise Error, 'unsupported compression type' self._decomp = cl.OpenDecompressor(scheme) self._convert = self._decomp_data + self._sampwidth = 2 else: self._comptype = 'NONE' self._compname = 'not compressed' @@ -655,7 +654,7 @@ def setcomptype(self, comptype, compname): if self._nframeswritten: raise Error, 'cannot change parameters after starting to write' - if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'): + if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): raise Error, 'unsupported compression type' self._comptype = comptype self._compname = compname @@ -675,7 +674,7 @@ nchannels, sampwidth, framerate, nframes, comptype, compname = info if self._nframeswritten: raise Error, 'cannot change parameters after starting to write' - if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'): + if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): raise Error, 'unsupported compression type' self.setnchannels(nchannels) self.setsampwidth(sampwidth) @@ -732,22 +731,28 @@ self._patchheader() def close(self): - self._ensure_header_written(0) - if self._datawritten & 1: - # quick pad to even size - self._file.write(chr(0)) - self._datawritten = self._datawritten + 1 - self._writemarkers() - if self._nframeswritten != self._nframes or \ - self._datalength != self._datawritten or \ - self._marklength: - self._patchheader() - if self._comp: - self._comp.CloseCompressor() - self._comp = None - # Prevent ref cycles - self._convert = None - self._file.close() + if self._file is None: + return + try: + self._ensure_header_written(0) + if self._datawritten & 1: + # quick pad to even size + self._file.write(chr(0)) + self._datawritten = self._datawritten + 1 + self._writemarkers() + if self._nframeswritten != self._nframes or \ + self._datalength != self._datawritten or \ + self._marklength: + self._patchheader() + if self._comp: + self._comp.CloseCompressor() + self._comp = None + finally: + # Prevent ref cycles + self._convert = None + f = self._file + self._file = None + f.close() # # Internal methods. @@ -798,7 +803,7 @@ try: import cl except ImportError: - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): try: import audioop self._convert = self._lin2ulaw @@ -806,9 +811,9 @@ except ImportError: pass raise Error, 'cannot write compressed AIFF-C files' - if self._comptype == 'ULAW': + if self._comptype in ('ULAW', 'ulaw'): scheme = cl.G711_ULAW - elif self._comptype == 'ALAW': + elif self._comptype in ('ALAW', 'alaw'): scheme = cl.G711_ALAW else: raise Error, 'unsupported compression type' @@ -861,7 +866,10 @@ _write_short(self._file, self._nchannels) self._nframes_pos = self._file.tell() _write_ulong(self._file, self._nframes) - _write_short(self._file, self._sampwidth * 8) + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): + _write_short(self._file, 8) + else: + _write_short(self._file, self._sampwidth * 8) _write_float(self._file, self._framerate) if self._aifc: self._file.write(self._comptype) @@ -947,23 +955,27 @@ sys.argv.append('/usr/demos/data/audio/bach.aiff') fn = sys.argv[1] f = open(fn, 'r') - print "Reading", fn - print "nchannels =", f.getnchannels() - print "nframes =", f.getnframes() - print "sampwidth =", f.getsampwidth() - print "framerate =", f.getframerate() - print "comptype =", f.getcomptype() - print "compname =", f.getcompname() - if sys.argv[2:]: - gn = sys.argv[2] - print "Writing", gn - g = open(gn, 'w') - g.setparams(f.getparams()) - while 1: - data = f.readframes(1024) - if not data: - break - g.writeframes(data) - g.close() + try: + print "Reading", fn + print "nchannels =", f.getnchannels() + print "nframes =", f.getnframes() + print "sampwidth =", f.getsampwidth() + print "framerate =", f.getframerate() + print "comptype =", f.getcomptype() + print "compname =", f.getcompname() + if sys.argv[2:]: + gn = sys.argv[2] + print "Writing", gn + g = open(gn, 'w') + try: + g.setparams(f.getparams()) + while 1: + data = f.readframes(1024) + if not data: + break + g.writeframes(data) + finally: + g.close() + print "Done." + finally: f.close() - print "Done." diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -740,10 +740,10 @@ - default -- The value to be produced if the option is not specified. - - type -- The type which the command-line arguments should be converted - to, should be one of 'string', 'int', 'float', 'complex' or a - callable object that accepts a single string argument. If None, - 'string' is assumed. + - type -- A callable that accepts a single string argument, and + returns the converted value. The standard Python types str, int, + float, and complex are useful examples of such callables. If None, + str is used. - choices -- A container of values that should be allowed. If not None, after a command-line argument has been converted to the appropriate @@ -1692,9 +1692,12 @@ return args def parse_known_args(self, args=None, namespace=None): - # args default to the system args if args is None: + # args default to the system args args = _sys.argv[1:] + else: + # make sure that args are mutable + args = list(args) # default Namespace built from parser defaults if namespace is None: @@ -1705,10 +1708,7 @@ if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: - default = action.default - if isinstance(action.default, basestring): - default = self._get_value(action, default) - setattr(namespace, action.dest, default) + setattr(namespace, action.dest, action.default) # add any parser defaults that aren't present for dest in self._defaults: @@ -1948,12 +1948,23 @@ if positionals: self.error(_('too few arguments')) - # make sure all required actions were present + # make sure all required actions were present, and convert defaults. for action in self._actions: - if action.required: - if action not in seen_actions: + if action not in seen_actions: + if action.required: name = _get_action_name(action) self.error(_('argument %s is required') % name) + else: + # Convert action default now instead of doing it before + # parsing arguments to avoid calling convert functions + # twice (which may fail) if the argument was given, but + # only if it was defined already in the namespace + if (action.default is not None and + isinstance(action.default, basestring) and + hasattr(namespace, action.dest) and + action.default is getattr(namespace, action.dest)): + setattr(namespace, action.dest, + self._get_value(action, action.default)) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: @@ -1979,7 +1990,7 @@ for arg_string in arg_strings: # for regular arguments, just add them back into the list - if arg_string[0] not in self.fromfile_prefix_chars: + if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content @@ -2186,9 +2197,12 @@ # Value conversion methods # ======================== def _get_values(self, action, arg_strings): - # for everything but PARSER args, strip out '--' + # for everything but PARSER, REMAINDER args, strip out first '--' if action.nargs not in [PARSER, REMAINDER]: - arg_strings = [s for s in arg_strings if s != '--'] + try: + arg_strings.remove('--') + except ValueError: + pass # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: diff --git a/lib-python/2.7/asyncore.py b/lib-python/2.7/asyncore.py --- a/lib-python/2.7/asyncore.py +++ b/lib-python/2.7/asyncore.py @@ -225,6 +225,7 @@ debug = False connected = False accepting = False + connecting = False closing = False addr = None ignore_log_types = frozenset(['warning']) @@ -248,7 +249,7 @@ try: self.addr = sock.getpeername() except socket.error, err: - if err.args[0] == ENOTCONN: + if err.args[0] in (ENOTCONN, EINVAL): # To handle the case where we got an unconnected # socket. self.connected = False @@ -342,9 +343,11 @@ def connect(self, address): self.connected = False + self.connecting = True err = self.socket.connect_ex(address) if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \ or err == EINVAL and os.name in ('nt', 'ce'): + self.addr = address return if err in (0, EISCONN): self.addr = address @@ -390,7 +393,7 @@ else: return data except socket.error, why: - # winsock sometimes throws ENOTCONN + # winsock sometimes raises ENOTCONN if why.args[0] in _DISCONNECTED: self.handle_close() return '' @@ -400,6 +403,7 @@ def close(self): self.connected = False self.accepting = False + self.connecting = False self.del_channel() try: self.socket.close() @@ -438,7 +442,8 @@ # sockets that are connected self.handle_accept() elif not self.connected: - self.handle_connect_event() + if self.connecting: + self.handle_connect_event() self.handle_read() else: self.handle_read() @@ -449,6 +454,7 @@ raise socket.error(err, _strerror(err)) self.handle_connect() self.connected = True + self.connecting = False def handle_write_event(self): if self.accepting: @@ -457,12 +463,8 @@ return if not self.connected: - #check for errors - err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) - if err != 0: - raise socket.error(err, _strerror(err)) - - self.handle_connect_event() + if self.connecting: + self.handle_connect_event() self.handle_write() def handle_expt_event(self): diff --git a/lib-python/2.7/bdb.py b/lib-python/2.7/bdb.py --- a/lib-python/2.7/bdb.py +++ b/lib-python/2.7/bdb.py @@ -24,6 +24,7 @@ self.skip = set(skip) if skip else None self.breaks = {} self.fncache = {} + self.frame_returning = None def canonic(self, filename): if filename == "<" + filename[1:-1] + ">": @@ -82,7 +83,11 @@ def dispatch_return(self, frame, arg): if self.stop_here(frame) or frame == self.returnframe: - self.user_return(frame, arg) + try: + self.frame_returning = frame + self.user_return(frame, arg) + finally: + self.frame_returning = None if self.quitting: raise BdbQuit return self.trace_dispatch @@ -186,6 +191,14 @@ def set_step(self): """Stop after one line of code.""" + # Issue #13183: pdb skips frames after hitting a breakpoint and running + # step commands. + # Restore the trace function in the caller (that may not have been set + # for performance reasons) when returning from the current frame. + if self.frame_returning: + caller_frame = self.frame_returning.f_back + if caller_frame and not caller_frame.f_trace: + caller_frame.f_trace = self.trace_dispatch self._set_stopinfo(None, None) def set_next(self, frame): diff --git a/lib-python/2.7/bsddb/__init__.py b/lib-python/2.7/bsddb/__init__.py --- a/lib-python/2.7/bsddb/__init__.py +++ b/lib-python/2.7/bsddb/__init__.py @@ -33,7 +33,7 @@ #---------------------------------------------------------------------- -"""Support for Berkeley DB 4.1 through 4.8 with a simple interface. +"""Support for Berkeley DB 4.3 through 5.3 with a simple interface. For the full featured object oriented interface use the bsddb.db module instead. It mirrors the Oracle Berkeley DB C API. @@ -138,7 +138,7 @@ except _bsddb.DBCursorClosedError: # the database was modified during iteration. abort. pass -# When Python 2.3 not supported in bsddb3, we can change this to "finally" +# When Python 2.4 not supported in bsddb3, we can change this to "finally" except : self._in_iter -= 1 raise @@ -181,7 +181,7 @@ except _bsddb.DBCursorClosedError: # the database was modified during iteration. abort. pass -# When Python 2.3 not supported in bsddb3, we can change this to "finally" +# When Python 2.4 not supported in bsddb3, we can change this to "finally" except : self._in_iter -= 1 raise diff --git a/lib-python/2.7/bsddb/dbobj.py b/lib-python/2.7/bsddb/dbobj.py --- a/lib-python/2.7/bsddb/dbobj.py +++ b/lib-python/2.7/bsddb/dbobj.py @@ -30,12 +30,7 @@ import db if sys.version_info < (2, 6) : - try: - from UserDict import DictMixin - except ImportError: - # DictMixin is new in Python 2.3 - class DictMixin: pass - MutableMapping = DictMixin + from UserDict import DictMixin as MutableMapping else : import collections MutableMapping = collections.MutableMapping @@ -196,6 +191,8 @@ return self._cobj.set_bt_compare(*args, **kwargs) def set_cachesize(self, *args, **kwargs): return self._cobj.set_cachesize(*args, **kwargs) + def set_dup_compare(self, *args, **kwargs) : + return self._cobj.set_dup_compare(*args, **kwargs) def set_flags(self, *args, **kwargs): return self._cobj.set_flags(*args, **kwargs) def set_h_ffactor(self, *args, **kwargs): diff --git a/lib-python/2.7/bsddb/dbshelve.py b/lib-python/2.7/bsddb/dbshelve.py --- a/lib-python/2.7/bsddb/dbshelve.py +++ b/lib-python/2.7/bsddb/dbshelve.py @@ -43,7 +43,7 @@ if sys.version_info < (2, 6) : import cPickle else : - # When we drop support for python 2.3 and 2.4 + # When we drop support for python 2.4 # we could use: (in 2.5 we need a __future__ statement) # # with warnings.catch_warnings(): @@ -51,7 +51,7 @@ # ... # # We can not use "with" as is, because it would be invalid syntax - # in python 2.3, 2.4 and (with no __future__) 2.5. + # in python 2.4 and (with no __future__) 2.5. # Here we simulate "with" following PEP 343 : import warnings w = warnings.catch_warnings() @@ -65,32 +65,12 @@ w.__exit__() del w -#At version 2.3 cPickle switched to using protocol instead of bin -if sys.version_info >= (2, 3): - HIGHEST_PROTOCOL = cPickle.HIGHEST_PROTOCOL -# In python 2.3.*, "cPickle.dumps" accepts no -# named parameters. "pickle.dumps" accepts them, -# so this seems a bug. - if sys.version_info < (2, 4): - def _dumps(object, protocol): - return cPickle.dumps(object, protocol) - else : - def _dumps(object, protocol): - return cPickle.dumps(object, protocol=protocol) - -else: - HIGHEST_PROTOCOL = None - def _dumps(object, protocol): - return cPickle.dumps(object, bin=protocol) - +HIGHEST_PROTOCOL = cPickle.HIGHEST_PROTOCOL +def _dumps(object, protocol): + return cPickle.dumps(object, protocol=protocol) if sys.version_info < (2, 6) : - try: - from UserDict import DictMixin - except ImportError: - # DictMixin is new in Python 2.3 - class DictMixin: pass - MutableMapping = DictMixin + from UserDict import DictMixin as MutableMapping else : import collections MutableMapping = collections.MutableMapping diff --git a/lib-python/2.7/bsddb/dbtables.py b/lib-python/2.7/bsddb/dbtables.py --- a/lib-python/2.7/bsddb/dbtables.py +++ b/lib-python/2.7/bsddb/dbtables.py @@ -30,7 +30,7 @@ if sys.version_info < (2, 6) : import cPickle as pickle else : - # When we drop support for python 2.3 and 2.4 + # When we drop support for python 2.4 # we could use: (in 2.5 we need a __future__ statement) # # with warnings.catch_warnings(): @@ -38,7 +38,7 @@ # ... # # We can not use "with" as is, because it would be invalid syntax - # in python 2.3, 2.4 and (with no __future__) 2.5. + # in python 2.4 and (with no __future__) 2.5. # Here we simulate "with" following PEP 343 : import warnings w = warnings.catch_warnings() diff --git a/lib-python/2.7/bsddb/test/test_all.py b/lib-python/2.7/bsddb/test/test_all.py --- a/lib-python/2.7/bsddb/test/test_all.py +++ b/lib-python/2.7/bsddb/test/test_all.py @@ -392,10 +392,8 @@ return self._dbenv.get_tmp_dir().decode(charset) def get_data_dirs(self) : - # Have to use a list comprehension and not - # generators, because we are supporting Python 2.3. return tuple( - [i.decode(charset) for i in self._dbenv.get_data_dirs()]) + (i.decode(charset) for i in self._dbenv.get_data_dirs())) class DBSequence_py3k(object) : def __init__(self, db, *args, **kwargs) : @@ -484,6 +482,8 @@ print '-=' * 38 print db.DB_VERSION_STRING print 'bsddb.db.version(): %s' % (db.version(), ) + if db.version() >= (5, 0) : + print 'bsddb.db.full_version(): %s' %repr(db.full_version()) print 'bsddb.db.__version__: %s' % db.__version__ print 'bsddb.db.cvsid: %s' % db.cvsid @@ -528,7 +528,8 @@ # This path can be overriden via "set_test_path_prefix()". import os, os.path -get_new_path.prefix=os.path.join(os.sep,"tmp","z-Berkeley_DB") +get_new_path.prefix=os.path.join(os.environ.get("TMPDIR", + os.path.join(os.sep,"tmp")), "z-Berkeley_DB") get_new_path.num=0 def get_test_path_prefix() : diff --git a/lib-python/2.7/bsddb/test/test_basics.py b/lib-python/2.7/bsddb/test/test_basics.py --- a/lib-python/2.7/bsddb/test/test_basics.py +++ b/lib-python/2.7/bsddb/test/test_basics.py @@ -9,6 +9,7 @@ from pprint import pprint import unittest import time +import sys from test_all import db, test_support, verbose, get_new_environment_path, \ get_new_database_path @@ -44,13 +45,6 @@ _numKeys = 1002 # PRIVATE. NOTE: must be an even value - import sys - if sys.version_info < (2, 4): - def assertTrue(self, expr, msg=None): - self.failUnless(expr,msg=msg) - def assertFalse(self, expr, msg=None): - self.failIf(expr,msg=msg) - def setUp(self): if self.useEnv: self.homeDir=get_new_environment_path() @@ -74,14 +68,13 @@ # create and open the DB self.d = db.DB(self.env) if not self.useEnv : - if db.version() >= (4, 2) : - self.d.set_cachesize(*self.cachesize) - cachesize = self.d.get_cachesize() - self.assertEqual(cachesize[0], self.cachesize[0]) - self.assertEqual(cachesize[2], self.cachesize[2]) - # Berkeley DB expands the cache 25% accounting overhead, - # if the cache is small. - self.assertEqual(125, int(100.0*cachesize[1]/self.cachesize[1])) + self.d.set_cachesize(*self.cachesize) + cachesize = self.d.get_cachesize() + self.assertEqual(cachesize[0], self.cachesize[0]) + self.assertEqual(cachesize[2], self.cachesize[2]) + # Berkeley DB expands the cache 25% accounting overhead, + # if the cache is small. + self.assertEqual(125, int(100.0*cachesize[1]/self.cachesize[1])) self.d.set_flags(self.dbsetflags) if self.dbname: self.d.open(self.filename, self.dbname, self.dbtype, @@ -161,7 +154,6 @@ try: d.delete('abcd') except db.DBNotFoundError, val: - import sys if sys.version_info < (2, 6) : self.assertEqual(val[0], db.DB_NOTFOUND) else : @@ -184,7 +176,6 @@ try: d.put('abcd', 'this should fail', flags=db.DB_NOOVERWRITE) except db.DBKeyExistError, val: - import sys if sys.version_info < (2, 6) : self.assertEqual(val[0], db.DB_KEYEXIST) else : @@ -338,7 +329,6 @@ rec = c.next() except db.DBNotFoundError, val: if get_raises_error: - import sys if sys.version_info < (2, 6) : self.assertEqual(val[0], db.DB_NOTFOUND) else : @@ -363,7 +353,6 @@ rec = c.prev() except db.DBNotFoundError, val: if get_raises_error: - import sys if sys.version_info < (2, 6) : self.assertEqual(val[0], db.DB_NOTFOUND) else : @@ -390,7 +379,6 @@ try: n = c.set('bad key') except db.DBNotFoundError, val: - import sys if sys.version_info < (2, 6) : From noreply at buildbot.pypy.org Thu Mar 13 00:21:06 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 13 Mar 2014 00:21:06 +0100 (CET) Subject: [pypy-commit] pypy vendor/stdlib: update the Python 3 stdlib to v3.2.5 Message-ID: <20140312232106.2F82B1C029E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: vendor/stdlib Changeset: r69915:bd484963d7ae Date: 2014-03-12 15:34 -0700 http://bitbucket.org/pypy/pypy/changeset/bd484963d7ae/ Log: update the Python 3 stdlib to v3.2.5 diff too long, truncating to 2000 out of 27236 lines diff --git a/lib-python/3/__future__.py b/lib-python/3/__future__.py --- a/lib-python/3/__future__.py +++ b/lib-python/3/__future__.py @@ -114,7 +114,7 @@ CO_FUTURE_DIVISION) absolute_import = _Feature((2, 5, 0, "alpha", 1), - (2, 7, 0, "alpha", 0), + (3, 0, 0, "alpha", 0), CO_FUTURE_ABSOLUTE_IMPORT) with_statement = _Feature((2, 5, 0, "alpha", 1), diff --git a/lib-python/3/_abcoll.py b/lib-python/3/_abcoll.py --- a/lib-python/3/_abcoll.py +++ b/lib-python/3/_abcoll.py @@ -184,12 +184,12 @@ def __gt__(self, other): if not isinstance(other, Set): return NotImplemented - return other < self + return other.__lt__(self) def __ge__(self, other): if not isinstance(other, Set): return NotImplemented - return other <= self + return other.__le__(self) def __eq__(self, other): if not isinstance(other, Set): diff --git a/lib-python/3/_osx_support.py b/lib-python/3/_osx_support.py new file mode 100644 --- /dev/null +++ b/lib-python/3/_osx_support.py @@ -0,0 +1,488 @@ +"""Shared OS X support functions.""" + +import os +import re +import sys + +__all__ = [ + 'compiler_fixup', + 'customize_config_vars', + 'customize_compiler', + 'get_platform_osx', +] + +# configuration variables that may contain universal build flags, +# like "-arch" or "-isdkroot", that may need customization for +# the user environment +_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS', + 'BLDSHARED', 'LDSHARED', 'CC', 'CXX', + 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS', + 'PY_CORE_CFLAGS') + +# configuration variables that may contain compiler calls +_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX') + +# prefix added to original configuration variable names +_INITPRE = '_OSX_SUPPORT_INITIAL_' + + +def _find_executable(executable, path=None): + """Tries to find 'executable' in the directories listed in 'path'. + + A string listing directories separated by 'os.pathsep'; defaults to + os.environ['PATH']. Returns the complete filename or None if not found. + """ + if path is None: + path = os.environ['PATH'] + + paths = path.split(os.pathsep) + base, ext = os.path.splitext(executable) + + if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'): + executable = executable + '.exe' + + if not os.path.isfile(executable): + for p in paths: + f = os.path.join(p, executable) + if os.path.isfile(f): + # the file exists, we have a shot at spawn working + return f + return None + else: + return executable + + +def _read_output(commandstring): + """Output from succesful command execution or None""" + # Similar to os.popen(commandstring, "r").read(), + # but without actually using os.popen because that + # function is not usable during python bootstrap. + # tempfile is also not available then. + import contextlib + try: + import tempfile + fp = tempfile.NamedTemporaryFile() + except ImportError: + fp = open("/tmp/_osx_support.%s"%( + os.getpid(),), "w+b") + + with contextlib.closing(fp) as fp: + cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) + return fp.read().decode('utf-8').strip() if not os.system(cmd) else None + + +def _find_build_tool(toolname): + """Find a build tool on current path or using xcrun""" + return (_find_executable(toolname) + or _read_output("/usr/bin/xcrun -find %s" % (toolname,)) + or '' + ) + +_SYSTEM_VERSION = None + +def _get_system_version(): + """Return the OS X system version as a string""" + # Reading this plist is a documented way to get the system + # version (see the documentation for the Gestalt Manager) + # We avoid using platform.mac_ver to avoid possible bootstrap issues during + # the build of Python itself (distutils is used to build standard library + # extensions). + + global _SYSTEM_VERSION + + if _SYSTEM_VERSION is None: + _SYSTEM_VERSION = '' + try: + f = open('/System/Library/CoreServices/SystemVersion.plist') + except IOError: + # We're on a plain darwin box, fall back to the default + # behaviour. + pass + else: + try: + m = re.search(r'ProductUserVisibleVersion\s*' + r'(.*?)', f.read()) + finally: + f.close() + if m is not None: + _SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + + return _SYSTEM_VERSION + +def _remove_original_values(_config_vars): + """Remove original unmodified values for testing""" + # This is needed for higher-level cross-platform tests of get_platform. + for k in list(_config_vars): + if k.startswith(_INITPRE): + del _config_vars[k] + +def _save_modified_value(_config_vars, cv, newvalue): + """Save modified and original unmodified value of configuration var""" + + oldvalue = _config_vars.get(cv, '') + if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars): + _config_vars[_INITPRE + cv] = oldvalue + _config_vars[cv] = newvalue + +def _supports_universal_builds(): + """Returns True if universal builds are supported on this system""" + # As an approximation, we assume that if we are running on 10.4 or above, + # then we are running with an Xcode environment that supports universal + # builds, in particular -isysroot and -arch arguments to the compiler. This + # is in support of allowing 10.4 universal builds to run on 10.3.x systems. + + osx_version = _get_system_version() + if osx_version: + try: + osx_version = tuple(int(i) for i in osx_version.split('.')) + except ValueError: + osx_version = '' + return bool(osx_version >= (10, 4)) if osx_version else False + + +def _find_appropriate_compiler(_config_vars): + """Find appropriate C compiler for extension module builds""" + + # Issue #13590: + # The OSX location for the compiler varies between OSX + # (or rather Xcode) releases. With older releases (up-to 10.5) + # the compiler is in /usr/bin, with newer releases the compiler + # can only be found inside Xcode.app if the "Command Line Tools" + # are not installed. + # + # Futhermore, the compiler that can be used varies between + # Xcode releases. Upto Xcode 4 it was possible to use 'gcc-4.2' + # as the compiler, after that 'clang' should be used because + # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that + # miscompiles Python. + + # skip checks if the compiler was overriden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + # The CC config var might contain additional arguments. + # Ignore them while searching. + cc = oldcc = _config_vars['CC'].split()[0] + if not _find_executable(cc): + # Compiler is not found on the shell search PATH. + # Now search for clang, first on PATH (if the Command LIne + # Tools have been installed in / or if the user has provided + # another location via CC). If not found, try using xcrun + # to find an uninstalled clang (within a selected Xcode). + + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself (and os.popen is + # implemented on top of subprocess and is therefore not + # usable as well) + + cc = _find_build_tool('clang') + + elif os.path.basename(cc).startswith('gcc'): + # Compiler is GCC, check if it is LLVM-GCC + data = _read_output("'%s' --version" + % (cc.replace("'", "'\"'\"'"),)) + if 'llvm-gcc' in data: + # Found LLVM-GCC, fall back to clang + cc = _find_build_tool('clang') + + if not cc: + raise SystemError( + "Cannot locate working compiler") + + if cc != oldcc: + # Found a replacement compiler. + # Modify config vars using new compiler, if not already explictly + # overriden by an env variable, preserving additional arguments. + for cv in _COMPILER_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + cv_split = _config_vars[cv].split() + cv_split[0] = cc if cv != 'CXX' else cc + '++' + _save_modified_value(_config_vars, cv, ' '.join(cv_split)) + + return _config_vars + + +def _remove_universal_flags(_config_vars): + """Remove all universal build arguments from config vars""" + + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overriden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) + flags = re.sub('-isysroot [^ \t]*', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _remove_unsupported_archs(_config_vars): + """Remove any unsupported archs from config vars""" + # Different Xcode releases support different sets for '-arch' + # flags. In particular, Xcode 4.x no longer supports the + # PPC architectures. + # + # This code automatically removes '-arch ppc' and '-arch ppc64' + # when these are not supported. That makes it possible to + # build extensions on OSX 10.7 and later with the prebuilt + # 32-bit installer on the python.org website. + + # skip checks if the compiler was overriden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None: + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself + status = os.system("'%s' -arch ppc -x c /dev/null 2>/dev/null"%( + _config_vars['CC'].replace("'", "'\"'\"'"),)) + # The Apple compiler drivers return status 255 if no PPC + if (status >> 8) == 255: + # Compiler doesn't support PPC, remove the related + # '-arch' flags if not explicitly overridden by an + # environment variable + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub('-arch\s+ppc\w*\s', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _override_all_archs(_config_vars): + """Allow override of all archs with ARCHFLAGS env var""" + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and '-arch' in _config_vars[cv]: + flags = _config_vars[cv] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _check_for_unavailable_sdk(_config_vars): + """Remove references to any SDKs not available""" + # If we're on OSX 10.5 or later and the user tries to + # compile an extension using an SDK that is not present + # on the current machine it is better to not use an SDK + # than to fail. This is particularly important with + # the standalong Command Line Tools alternative to a + # full-blown Xcode install since the CLT packages do not + # provide SDKs. If the SDK is not present, it is assumed + # that the header files and dev libs have been installed + # to /usr and /System/Library by either a standalone CLT + # package or the CLT component within Xcode. + cflags = _config_vars.get('CFLAGS', '') + m = re.search(r'-isysroot\s+(\S+)', cflags) + if m is not None: + sdk = m.group(1) + if not os.path.exists(sdk): + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overriden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def compiler_fixup(compiler_so, cc_args): + """ + This function will strip '-isysroot PATH' and '-arch ARCH' from the + compile flags if the user has specified one them in extra_compile_flags. + + This is needed because '-arch ARCH' adds another architecture to the + build, without a way to remove an architecture. Furthermore GCC will + barf if multiple '-isysroot' arguments are present. + """ + stripArch = stripSysroot = False + + compiler_so = list(compiler_so) + + if not _supports_universal_builds(): + # OSX before 10.4.0, these don't support -arch and -isysroot at + # all. + stripArch = stripSysroot = True + else: + stripArch = '-arch' in cc_args + stripSysroot = '-isysroot' in cc_args + + if stripArch or 'ARCHFLAGS' in os.environ: + while True: + try: + index = compiler_so.index('-arch') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + if 'ARCHFLAGS' in os.environ and not stripArch: + # User specified different -arch flags in the environ, + # see also distutils.sysconfig + compiler_so = compiler_so + os.environ['ARCHFLAGS'].split() + + if stripSysroot: + while True: + try: + index = compiler_so.index('-isysroot') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + # Check if the SDK that is used during compilation actually exists, + # the universal build requires the usage of a universal SDK and not all + # users have that installed by default. + sysroot = None + if '-isysroot' in cc_args: + idx = cc_args.index('-isysroot') + sysroot = cc_args[idx+1] + elif '-isysroot' in compiler_so: + idx = compiler_so.index('-isysroot') + sysroot = compiler_so[idx+1] + + if sysroot and not os.path.isdir(sysroot): + from distutils import log + log.warn("Compiling with an SDK that doesn't seem to exist: %s", + sysroot) + log.warn("Please check your Xcode installation") + + return compiler_so + + +def customize_config_vars(_config_vars): + """Customize Python build configuration variables. + + Called internally from sysconfig with a mutable mapping + containing name/value pairs parsed from the configured + makefile used to build this interpreter. Returns + the mapping updated as needed to reflect the environment + in which the interpreter is running; in the case of + a Python from a binary installer, the installed + environment may be very different from the build + environment, i.e. different OS levels, different + built tools, different available CPU architectures. + + This customization is performed whenever + distutils.sysconfig.get_config_vars() is first + called. It may be used in environments where no + compilers are present, i.e. when installing pure + Python dists. Customization of compiler paths + and detection of unavailable archs is deferred + until the first extention module build is + requested (in distutils.sysconfig.customize_compiler). + + Currently called from distutils.sysconfig + """ + + if not _supports_universal_builds(): + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + _remove_universal_flags(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + # Remove references to sdks that are not found + _check_for_unavailable_sdk(_config_vars) + + return _config_vars + + +def customize_compiler(_config_vars): + """Customize compiler path and configuration variables. + + This customization is performed when the first + extension module build is requested + in distutils.sysconfig.customize_compiler). + """ + + # Find a compiler to use for extension module builds + _find_appropriate_compiler(_config_vars) + + # Remove ppc arch flags if not supported here + _remove_unsupported_archs(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + return _config_vars + + +def get_platform_osx(_config_vars, osname, release, machine): + """Filter values for get_platform()""" + # called from get_platform() in sysconfig and distutils.util + # + # For our purposes, we'll assume that the system version from + # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set + # to. This makes the compatibility story a bit more sane because the + # machine is going to compile and link as if it were + # MACOSX_DEPLOYMENT_TARGET. + + macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '') + macrelease = _get_system_version() or macver + macver = macver or macrelease + + if macver: + release = macver + osname = "macosx" + + # Use the original CFLAGS value, if available, so that we + # return the same machine type for the platform string. + # Otherwise, distutils may consider this a cross-compiling + # case and disallow installs. + cflags = _config_vars.get(_INITPRE+'CFLAGS', + _config_vars.get('CFLAGS', '')) + if ((macrelease + '.') >= '10.4.' and + '-arch' in cflags.strip()): + # The universal build will build fat binaries, but not on + # systems before 10.4 + + machine = 'fat' + + archs = re.findall('-arch\s+(\S+)', cflags) + archs = tuple(sorted(set(archs))) + + if len(archs) == 1: + machine = archs[0] + elif archs == ('i386', 'ppc'): + machine = 'fat' + elif archs == ('i386', 'x86_64'): + machine = 'intel' + elif archs == ('i386', 'ppc', 'x86_64'): + machine = 'fat3' + elif archs == ('ppc64', 'x86_64'): + machine = 'fat64' + elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): + machine = 'universal' + else: + raise ValueError( + "Don't know machine value for archs=%r" % (archs,)) + + elif machine == 'i386': + # On OSX the machine type returned by uname is always the + # 32-bit variant, even if the executable architecture is + # the 64-bit variant + if sys.maxsize >= 2**32: + machine = 'x86_64' + + elif machine in ('PowerPC', 'Power_Macintosh'): + # Pick a sane name for the PPC architecture. + # See 'i386' case + if sys.maxsize >= 2**32: + machine = 'ppc64' + else: + machine = 'ppc' + + return (osname, release, machine) diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py --- a/lib-python/3/_pyio.py +++ b/lib-python/3/_pyio.py @@ -298,7 +298,7 @@ def seek(self, pos, whence=0): """Change stream position. - Change the stream position to byte offset offset. offset is + Change the stream position to byte offset pos. Argument pos is interpreted relative to the position indicated by whence. Values for whence are ints: @@ -889,12 +889,18 @@ return pos def readable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True def writable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True @@ -1567,6 +1573,8 @@ return self._buffer def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return self._seekable def readable(self): diff --git a/lib-python/3/_strptime.py b/lib-python/3/_strptime.py --- a/lib-python/3/_strptime.py +++ b/lib-python/3/_strptime.py @@ -339,7 +339,7 @@ raise ValueError("unconverted data remains: %s" % data_string[found.end():]) - year = 1900 + year = None month = day = 1 hour = minute = second = fraction = 0 tz = -1 @@ -444,6 +444,12 @@ else: tz = value break + leap_year_fix = False + if year is None and month == 2 and day == 29: + year = 1904 # 1904 is first leap year of 20th century + leap_year_fix = True + elif year is None: + year = 1900 # If we know the week of the year and what day of that week, we can figure # out the Julian day of the year. if julian == -1 and week_of_year != -1 and weekday != -1: @@ -472,6 +478,12 @@ else: gmtoff = None + if leap_year_fix: + # the caller didn't supply a year but asked for Feb 29th. We couldn't + # use the default of 1900 for computations. We set it back to ensure + # that February 29th is smaller than March 1st. + year = 1900 + return (year, month, day, hour, minute, second, weekday, julian, tz, gmtoff, tzname), fraction diff --git a/lib-python/3/_weakrefset.py b/lib-python/3/_weakrefset.py --- a/lib-python/3/_weakrefset.py +++ b/lib-python/3/_weakrefset.py @@ -63,7 +63,7 @@ yield item def __len__(self): - return sum(x() is not None for x in self.data) + return len(self.data) - len(self._pending_removals) def __contains__(self, item): try: @@ -114,36 +114,21 @@ def update(self, other): if self._pending_removals: self._commit_removals() - if isinstance(other, self.__class__): - self.data.update(other.data) - else: - for element in other: - self.add(element) + for element in other: + self.add(element) def __ior__(self, other): self.update(other) return self - # Helper functions for simple delegating methods. - def _apply(self, other, method): - if not isinstance(other, self.__class__): - other = self.__class__(other) - newdata = method(other.data) - newset = self.__class__() - newset.data = newdata + def difference(self, other): + newset = self.copy() + newset.difference_update(other) return newset - - def difference(self, other): - return self._apply(other, self.data.difference) __sub__ = difference def difference_update(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.difference_update(ref(item) for item in other) + self.__isub__(other) def __isub__(self, other): if self._pending_removals: self._commit_removals() @@ -154,13 +139,11 @@ return self def intersection(self, other): - return self._apply(other, self.data.intersection) + return self.__class__(item for item in other if item in self) __and__ = intersection def intersection_update(self, other): - if self._pending_removals: - self._commit_removals() - self.data.intersection_update(ref(item) for item in other) + self.__iand__(other) def __iand__(self, other): if self._pending_removals: self._commit_removals() @@ -169,17 +152,17 @@ def issubset(self, other): return self.data.issubset(ref(item) for item in other) - __lt__ = issubset + __le__ = issubset - def __le__(self, other): - return self.data <= set(ref(item) for item in other) + def __lt__(self, other): + return self.data < set(ref(item) for item in other) def issuperset(self, other): return self.data.issuperset(ref(item) for item in other) - __gt__ = issuperset + __ge__ = issuperset - def __ge__(self, other): - return self.data >= set(ref(item) for item in other) + def __gt__(self, other): + return self.data > set(ref(item) for item in other) def __eq__(self, other): if not isinstance(other, self.__class__): @@ -187,27 +170,24 @@ return self.data == set(ref(item) for item in other) def symmetric_difference(self, other): - return self._apply(other, self.data.symmetric_difference) + newset = self.copy() + newset.symmetric_difference_update(other) + return newset __xor__ = symmetric_difference def symmetric_difference_update(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.symmetric_difference_update(ref(item) for item in other) + self.__ixor__(other) def __ixor__(self, other): if self._pending_removals: self._commit_removals() if self is other: self.data.clear() else: - self.data.symmetric_difference_update(ref(item) for item in other) + self.data.symmetric_difference_update(ref(item, self._remove) for item in other) return self def union(self, other): - return self._apply(other, self.data.union) + return self.__class__(e for s in (self, other) for e in s) __or__ = union def isdisjoint(self, other): diff --git a/lib-python/3/aifc.py b/lib-python/3/aifc.py --- a/lib-python/3/aifc.py +++ b/lib-python/3/aifc.py @@ -692,7 +692,9 @@ self._patchheader() def close(self): - if self._file: + if self._file is None: + return + try: self._ensure_header_written(0) if self._datawritten & 1: # quick pad to even size @@ -703,10 +705,12 @@ self._datalength != self._datawritten or \ self._marklength: self._patchheader() + finally: # Prevent ref cycles self._convert = None - self._file.close() + f = self._file self._file = None + f.close() # # Internal methods. diff --git a/lib-python/3/argparse.py b/lib-python/3/argparse.py --- a/lib-python/3/argparse.py +++ b/lib-python/3/argparse.py @@ -736,10 +736,10 @@ - default -- The value to be produced if the option is not specified. - - type -- The type which the command-line arguments should be converted - to, should be one of 'string', 'int', 'float', 'complex' or a - callable object that accepts a single string argument. If None, - 'string' is assumed. + - type -- A callable that accepts a single string argument, and + returns the converted value. The standard Python types str, int, + float, and complex are useful examples of such callables. If None, + str is used. - choices -- A container of values that should be allowed. If not None, after a command-line argument has been converted to the appropriate @@ -1701,9 +1701,12 @@ return args def parse_known_args(self, args=None, namespace=None): - # args default to the system args if args is None: + # args default to the system args args = _sys.argv[1:] + else: + # make sure that args are mutable + args = list(args) # default Namespace built from parser defaults if namespace is None: @@ -1714,10 +1717,7 @@ if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: - default = action.default - if isinstance(action.default, str): - default = self._get_value(action, default) - setattr(namespace, action.dest, default) + setattr(namespace, action.dest, action.default) # add any parser defaults that aren't present for dest in self._defaults: @@ -1945,12 +1945,23 @@ if positionals: self.error(_('too few arguments')) - # make sure all required actions were present + # make sure all required actions were present, and convert defaults. for action in self._actions: - if action.required: - if action not in seen_actions: + if action not in seen_actions: + if action.required: name = _get_action_name(action) self.error(_('argument %s is required') % name) + else: + # Convert action default now instead of doing it before + # parsing arguments to avoid calling convert functions + # twice (which may fail) if the argument was given, but + # only if it was defined already in the namespace + if (action.default is not None and + isinstance(action.default, str) and + hasattr(namespace, action.dest) and + action.default is getattr(namespace, action.dest)): + setattr(namespace, action.dest, + self._get_value(action, action.default)) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: @@ -1976,7 +1987,7 @@ for arg_string in arg_strings: # for regular arguments, just add them back into the list - if arg_string[0] not in self.fromfile_prefix_chars: + if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content @@ -2186,9 +2197,12 @@ # Value conversion methods # ======================== def _get_values(self, action, arg_strings): - # for everything but PARSER args, strip out '--' + # for everything but PARSER, REMAINDER args, strip out first '--' if action.nargs not in [PARSER, REMAINDER]: - arg_strings = [s for s in arg_strings if s != '--'] + try: + arg_strings.remove('--') + except ValueError: + pass # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: diff --git a/lib-python/3/asyncore.py b/lib-python/3/asyncore.py --- a/lib-python/3/asyncore.py +++ b/lib-python/3/asyncore.py @@ -225,6 +225,7 @@ debug = False connected = False accepting = False + connecting = False closing = False addr = None ignore_log_types = frozenset(['warning']) @@ -248,7 +249,7 @@ try: self.addr = sock.getpeername() except socket.error as err: - if err.args[0] == ENOTCONN: + if err.args[0] in (ENOTCONN, EINVAL): # To handle the case where we got an unconnected # socket. self.connected = False @@ -342,9 +343,11 @@ def connect(self, address): self.connected = False + self.connecting = True err = self.socket.connect_ex(address) if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \ or err == EINVAL and os.name in ('nt', 'ce'): + self.addr = address return if err in (0, EISCONN): self.addr = address @@ -390,7 +393,7 @@ else: return data except socket.error as why: - # winsock sometimes throws ENOTCONN + # winsock sometimes raises ENOTCONN if why.args[0] in _DISCONNECTED: self.handle_close() return b'' @@ -400,6 +403,7 @@ def close(self): self.connected = False self.accepting = False + self.connecting = False self.del_channel() try: self.socket.close() @@ -438,7 +442,8 @@ # sockets that are connected self.handle_accept() elif not self.connected: - self.handle_connect_event() + if self.connecting: + self.handle_connect_event() self.handle_read() else: self.handle_read() @@ -449,6 +454,7 @@ raise socket.error(err, _strerror(err)) self.handle_connect() self.connected = True + self.connecting = False def handle_write_event(self): if self.accepting: @@ -457,12 +463,8 @@ return if not self.connected: - #check for errors - err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) - if err != 0: - raise socket.error(err, _strerror(err)) - - self.handle_connect_event() + if self.connecting: + self.handle_connect_event() self.handle_write() def handle_expt_event(self): diff --git a/lib-python/3/bdb.py b/lib-python/3/bdb.py --- a/lib-python/3/bdb.py +++ b/lib-python/3/bdb.py @@ -22,6 +22,7 @@ self.skip = set(skip) if skip else None self.breaks = {} self.fncache = {} + self.frame_returning = None def canonic(self, filename): if filename == "<" + filename[1:-1] + ">": @@ -80,7 +81,11 @@ def dispatch_return(self, frame, arg): if self.stop_here(frame) or frame == self.returnframe: - self.user_return(frame, arg) + try: + self.frame_returning = frame + self.user_return(frame, arg) + finally: + self.frame_returning = None if self.quitting: raise BdbQuit return self.trace_dispatch @@ -186,6 +191,14 @@ def set_step(self): """Stop after one line of code.""" + # Issue #13183: pdb skips frames after hitting a breakpoint and running + # step commands. + # Restore the trace function in the caller (that may not have been set + # for performance reasons) when returning from the current frame. + if self.frame_returning: + caller_frame = self.frame_returning.f_back + if caller_frame and not caller_frame.f_trace: + caller_frame.f_trace = self.trace_dispatch self._set_stopinfo(None, None) def set_next(self, frame): diff --git a/lib-python/3/calendar.py b/lib-python/3/calendar.py --- a/lib-python/3/calendar.py +++ b/lib-python/3/calendar.py @@ -161,7 +161,11 @@ oneday = datetime.timedelta(days=1) while True: yield date - date += oneday + try: + date += oneday + except OverflowError: + # Adding one day could fail after datetime.MAXYEAR + break if date.month != month and date.weekday() == self.firstweekday: break diff --git a/lib-python/3/cgi.py b/lib-python/3/cgi.py --- a/lib-python/3/cgi.py +++ b/lib-python/3/cgi.py @@ -214,17 +214,17 @@ """ import http.client - boundary = "" + boundary = b"" if 'boundary' in pdict: boundary = pdict['boundary'] if not valid_boundary(boundary): raise ValueError('Invalid boundary in multipart form: %r' % (boundary,)) - nextpart = "--" + boundary - lastpart = "--" + boundary + "--" + nextpart = b"--" + boundary + lastpart = b"--" + boundary + b"--" partdict = {} - terminator = "" + terminator = b"" while terminator != lastpart: bytes = -1 @@ -243,7 +243,7 @@ raise ValueError('Maximum content length exceeded') data = fp.read(bytes) else: - data = "" + data = b"" # Read lines until end of part. lines = [] while 1: @@ -251,7 +251,7 @@ if not line: terminator = lastpart # End outer loop break - if line.startswith("--"): + if line.startswith(b"--"): terminator = line.rstrip() if terminator in (nextpart, lastpart): break @@ -263,12 +263,12 @@ if lines: # Strip final line terminator line = lines[-1] - if line[-2:] == "\r\n": + if line[-2:] == b"\r\n": line = line[:-2] - elif line[-1:] == "\n": + elif line[-1:] == b"\n": line = line[:-1] lines[-1] = line - data = "".join(lines) + data = b"".join(lines) line = headers['content-disposition'] if not line: continue diff --git a/lib-python/3/cgitb.py b/lib-python/3/cgitb.py --- a/lib-python/3/cgitb.py +++ b/lib-python/3/cgitb.py @@ -293,14 +293,19 @@ if self.logdir is not None: suffix = ['.txt', '.html'][self.format=="html"] (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir) + try: file = os.fdopen(fd, 'w') file.write(doc) file.close() - msg = '

%s contains the description of this error.' % path + msg = '%s contains the description of this error.' % path except: - msg = '

Tried to save traceback to %s, but failed.' % path - self.file.write(msg + '\n') + msg = 'Tried to save traceback to %s, but failed.' % path + + if self.format == 'html': + self.file.write('

%s

\n' % msg) + else: + self.file.write(msg + '\n') try: self.file.flush() except: pass diff --git a/lib-python/3/collections.py b/lib-python/3/collections.py --- a/lib-python/3/collections.py +++ b/lib-python/3/collections.py @@ -281,6 +281,10 @@ 'Return self as a plain tuple. Used by copy and pickle.' return tuple(self) + def __getstate__(self): + 'Exclude the OrderedDict from pickling' + return None + {field_defs} ''' diff --git a/lib-python/3/concurrent/futures/_base.py b/lib-python/3/concurrent/futures/_base.py --- a/lib-python/3/concurrent/futures/_base.py +++ b/lib-python/3/concurrent/futures/_base.py @@ -112,12 +112,14 @@ def __init__(self, num_pending_calls, stop_on_exception): self.num_pending_calls = num_pending_calls self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() super().__init__() def _decrement_pending_calls(self): - self.num_pending_calls -= 1 - if not self.num_pending_calls: - self.event.set() + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() def add_result(self, future): super().add_result(future) @@ -517,7 +519,7 @@ """Returns a iterator equivalent to map(fn, iter). Args: - fn: A callable that will take take as many arguments as there are + fn: A callable that will take as many arguments as there are passed iterables. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. diff --git a/lib-python/3/configparser.py b/lib-python/3/configparser.py --- a/lib-python/3/configparser.py +++ b/lib-python/3/configparser.py @@ -99,10 +99,9 @@ yes, on for True). Returns False or True. items(section=_UNSET, raw=False, vars=None) - If section is given, return a list of tuples with (section_name, - section_proxy) for each section, including DEFAULTSECT. Otherwise, - return a list of tuples with (name, value) for each option - in the section. + If section is given, return a list of tuples with (name, value) for + each option in the section. Otherwise, return a list of tuples with + (section_name, section_proxy) for each section, including DEFAULTSECT. remove_section(section) Remove the given file section and all its options. @@ -852,6 +851,19 @@ value_getter = lambda option: d[option] return [(option, value_getter(option)) for option in d.keys()] + def popitem(self): + """Remove a section from the parser and return it as + a (section_name, section_proxy) tuple. If no section is present, raise + KeyError. + + The section DEFAULT is never returned because it cannot be removed. + """ + for key in self.sections(): + value = self[key] + del self[key] + return key, value + raise KeyError + def optionxform(self, optionstr): return optionstr.lower() @@ -947,7 +959,8 @@ # XXX this is not atomic if read_dict fails at any point. Then again, # no update method in configparser is atomic in this implementation. - self.remove_section(key) + if key in self._sections: + self._sections[key].clear() self.read_dict({key: value}) def __delitem__(self, key): diff --git a/lib-python/3/ctypes/test/test_bitfields.py b/lib-python/3/ctypes/test/test_bitfields.py --- a/lib-python/3/ctypes/test/test_bitfields.py +++ b/lib-python/3/ctypes/test/test_bitfields.py @@ -240,5 +240,25 @@ _anonymous_ = ["_"] _fields_ = [("_", X)] + @unittest.skipUnless(hasattr(ctypes, "c_uint32"), "c_int32 is required") + def test_uint32(self): + class X(Structure): + _fields_ = [("a", c_uint32, 32)] + x = X() + x.a = 10 + self.assertEqual(x.a, 10) + x.a = 0xFDCBA987 + self.assertEqual(x.a, 0xFDCBA987) + + @unittest.skipUnless(hasattr(ctypes, "c_uint64"), "c_int64 is required") + def test_uint64(self): + class X(Structure): + _fields_ = [("a", c_uint64, 64)] + x = X() + x.a = 10 + self.assertEqual(x.a, 10) + x.a = 0xFEDCBA9876543211 + self.assertEqual(x.a, 0xFEDCBA9876543211) + if __name__ == "__main__": unittest.main() diff --git a/lib-python/3/ctypes/test/test_numbers.py b/lib-python/3/ctypes/test/test_numbers.py --- a/lib-python/3/ctypes/test/test_numbers.py +++ b/lib-python/3/ctypes/test/test_numbers.py @@ -217,6 +217,16 @@ # probably be changed: self.assertRaises(TypeError, c_int, c_long(42)) + def test_float_overflow(self): + import sys + big_int = int(sys.float_info.max) * 2 + for t in float_types + [c_longdouble]: + self.assertRaises(OverflowError, t, big_int) + if (hasattr(t, "__ctype_be__")): + self.assertRaises(OverflowError, t.__ctype_be__, big_int) + if (hasattr(t, "__ctype_le__")): + self.assertRaises(OverflowError, t.__ctype_le__, big_int) + ## def test_perf(self): ## check_perf() diff --git a/lib-python/3/ctypes/test/test_returnfuncptrs.py b/lib-python/3/ctypes/test/test_returnfuncptrs.py --- a/lib-python/3/ctypes/test/test_returnfuncptrs.py +++ b/lib-python/3/ctypes/test/test_returnfuncptrs.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +import os import _ctypes_test @@ -33,5 +34,34 @@ self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0) self.assertRaises(TypeError, strchr, b"abcdef") + def test_from_dll(self): + dll = CDLL(_ctypes_test.__file__) + # _CFuncPtr instances are now callable with a tuple argument + # which denotes a function name and a dll: + strchr = CFUNCTYPE(c_char_p, c_char_p, c_char)(("my_strchr", dll)) + self.assertTrue(strchr(b"abcdef", b"b"), "bcdef") + self.assertEqual(strchr(b"abcdef", b"x"), None) + self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0) + self.assertRaises(TypeError, strchr, b"abcdef") + + # Issue 6083: Reference counting bug + def test_from_dll_refcount(self): + class BadSequence(tuple): + def __getitem__(self, key): + if key == 0: + return "my_strchr" + if key == 1: + return CDLL(_ctypes_test.__file__) + raise IndexError + + # _CFuncPtr instances are now callable with a tuple argument + # which denotes a function name and a dll: + strchr = CFUNCTYPE(c_char_p, c_char_p, c_char)( + BadSequence(("my_strchr", CDLL(_ctypes_test.__file__)))) + self.assertTrue(strchr(b"abcdef", b"b"), "bcdef") + self.assertEqual(strchr(b"abcdef", b"x"), None) + self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0) + self.assertRaises(TypeError, strchr, b"abcdef") + if __name__ == "__main__": unittest.main() diff --git a/lib-python/3/ctypes/test/test_structures.py b/lib-python/3/ctypes/test/test_structures.py --- a/lib-python/3/ctypes/test/test_structures.py +++ b/lib-python/3/ctypes/test/test_structures.py @@ -1,6 +1,7 @@ import unittest from ctypes import * from struct import calcsize +import _testcapi class SubclassesTest(unittest.TestCase): def test_subclass(self): @@ -199,6 +200,14 @@ "_pack_": -1} self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) + # Issue 15989 + d = {"_fields_": [("a", c_byte)], + "_pack_": _testcapi.INT_MAX + 1} + self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) + d = {"_fields_": [("a", c_byte)], + "_pack_": _testcapi.UINT_MAX + 2} + self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) + def test_initializers(self): class Person(Structure): _fields_ = [("name", c_char*6), diff --git a/lib-python/3/ctypes/test/test_win32.py b/lib-python/3/ctypes/test/test_win32.py --- a/lib-python/3/ctypes/test/test_win32.py +++ b/lib-python/3/ctypes/test/test_win32.py @@ -3,6 +3,7 @@ from ctypes import * from ctypes.test import is_resource_enabled import unittest, sys +from test import support import _ctypes_test @@ -60,7 +61,9 @@ def test_COMError(self): from _ctypes import COMError - self.assertEqual(COMError.__doc__, "Raised when a COM method call failed.") + if support.HAVE_DOCSTRINGS: + self.assertEqual(COMError.__doc__, + "Raised when a COM method call failed.") ex = COMError(-1, "text", ("details",)) self.assertEqual(ex.hresult, -1) diff --git a/lib-python/3/curses/__init__.py b/lib-python/3/curses/__init__.py --- a/lib-python/3/curses/__init__.py +++ b/lib-python/3/curses/__init__.py @@ -5,7 +5,7 @@ import curses from curses import textpad - curses.initwin() + curses.initscr() ... """ diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -1821,6 +1821,8 @@ return (self._offset, self._name) def __eq__(self, other): + if type(other) != timezone: + return False return self._offset == other._offset def __hash__(self): diff --git a/lib-python/3/decimal.py b/lib-python/3/decimal.py --- a/lib-python/3/decimal.py +++ b/lib-python/3/decimal.py @@ -1555,7 +1555,13 @@ def __float__(self): """Float representation.""" - return float(str(self)) + if self._isnan(): + if self.is_snan(): + raise ValueError("Cannot convert signaling NaN to float") + s = "-nan" if self._sign else "nan" + else: + s = str(self) + return float(s) def __int__(self): """Converts self to an int, truncating if necessary.""" diff --git a/lib-python/3/distutils/__init__.py b/lib-python/3/distutils/__init__.py --- a/lib-python/3/distutils/__init__.py +++ b/lib-python/3/distutils/__init__.py @@ -13,5 +13,5 @@ # Updated automatically by the Python release process. # #--start constants-- -__version__ = "3.2.3" +__version__ = "3.2.5" #--end constants-- diff --git a/lib-python/3/distutils/command/bdist_rpm.py b/lib-python/3/distutils/command/bdist_rpm.py --- a/lib-python/3/distutils/command/bdist_rpm.py +++ b/lib-python/3/distutils/command/bdist_rpm.py @@ -3,7 +3,7 @@ Implements the Distutils 'bdist_rpm' command (create RPM source and binary distributions).""" -import sys, os +import subprocess, sys, os from distutils.core import Command from distutils.debug import DEBUG from distutils.util import get_platform @@ -190,7 +190,7 @@ if self.fix_python: self.python = sys.executable else: - self.python = "python" + self.python = "python3" elif self.fix_python: raise DistutilsOptionError( "--python and --fix-python are mutually exclusive options") @@ -320,6 +320,7 @@ rpm_cmd.append('-bb') else: rpm_cmd.append('-ba') + rpm_cmd.extend(['--define', '__python %s' % self.python]) if self.rpm3_mode: rpm_cmd.extend(['--define', '_topdir %s' % os.path.abspath(self.rpm_base)]) @@ -405,6 +406,21 @@ 'Summary: ' + self.distribution.get_description(), ] + # Workaround for #14443 which affects some RPM based systems such as + # RHEL6 (and probably derivatives) + vendor_hook = subprocess.getoutput('rpm --eval %{__os_install_post}') + # Generate a potential replacement value for __os_install_post (whilst + # normalizing the whitespace to simplify the test for whether the + # invocation of brp-python-bytecompile passes in __python): + vendor_hook = '\n'.join([' %s \\' % line.strip() + for line in vendor_hook.splitlines()]) + problem = "brp-python-bytecompile \\\n" + fixed = "brp-python-bytecompile %{__python} \\\n" + fixed_hook = vendor_hook.replace(problem, fixed) + if fixed_hook != vendor_hook: + spec_file.append('# Workaround for http://bugs.python.org/issue14443') + spec_file.append('%define __os_install_post ' + fixed_hook + '\n') + # put locale summaries into spec file # XXX not supported for now (hard to put a dictionary # in a config file -- arg!) diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py --- a/lib-python/3/distutils/command/build_ext.py +++ b/lib-python/3/distutils/command/build_ext.py @@ -667,10 +667,10 @@ if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') + ext_suffix = get_config_var('EXT_SUFFIX') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext - return os.path.join(*ext_path) + so_ext + return os.path.join(*ext_path) + '_d' + ext_suffix + return os.path.join(*ext_path) + ext_suffix def get_export_symbols(self, ext): """Return the list of symbols that a shared extension has to diff --git a/lib-python/3/distutils/command/check.py b/lib-python/3/distutils/command/check.py --- a/lib-python/3/distutils/command/check.py +++ b/lib-python/3/distutils/command/check.py @@ -23,6 +23,9 @@ def system_message(self, level, message, *children, **kwargs): self.messages.append((level, message, children, kwargs)) + return nodes.system_message(message, level=level, + type=self.levels[level], + *children, **kwargs) HAS_DOCUTILS = True except Exception: diff --git a/lib-python/3/distutils/command/install.py b/lib-python/3/distutils/command/install.py --- a/lib-python/3/distutils/command/install.py +++ b/lib-python/3/distutils/command/install.py @@ -278,8 +278,8 @@ if self.user and (self.prefix or self.exec_prefix or self.home or self.install_base or self.install_platbase): - raise DistutilsOptionError("can't combine user with with prefix/" - "exec_prefix/home or install_(plat)base") + raise DistutilsOptionError("can't combine user with prefix, " + "exec_prefix/home, or install_(plat)base") # Next, stuff that's wrong (or dubious) only on certain platforms. if os.name != "posix": diff --git a/lib-python/3/distutils/command/upload.py b/lib-python/3/distutils/command/upload.py --- a/lib-python/3/distutils/command/upload.py +++ b/lib-python/3/distutils/command/upload.py @@ -125,7 +125,7 @@ if self.sign: data['gpg_signature'] = (os.path.basename(filename) + ".asc", - open(filename+".asc").read()) + open(filename+".asc", "rb").read()) # set up the authentication user_pass = (self.username + ":" + self.password).encode('ascii') diff --git a/lib-python/3/distutils/config.py b/lib-python/3/distutils/config.py --- a/lib-python/3/distutils/config.py +++ b/lib-python/3/distutils/config.py @@ -4,7 +4,6 @@ that uses .pypirc in the distutils.command package. """ import os -import sys from configparser import ConfigParser from distutils.cmd import Command @@ -43,16 +42,8 @@ def _store_pypirc(self, username, password): """Creates a default .pypirc file.""" rc = self._get_rc_file() - f = open(rc, 'w') - try: + with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: f.write(DEFAULT_PYPIRC % (username, password)) - finally: - f.close() - try: - os.chmod(rc, 0o600) - except OSError: - # should do something better here - pass def _read_pypirc(self): """Reads the .pypirc file.""" diff --git a/lib-python/3/distutils/dir_util.py b/lib-python/3/distutils/dir_util.py --- a/lib-python/3/distutils/dir_util.py +++ b/lib-python/3/distutils/dir_util.py @@ -141,6 +141,10 @@ src_name = os.path.join(src, n) dst_name = os.path.join(dst, n) + if n.startswith('.nfs'): + # skip NFS rename files + continue + if preserve_symlinks and os.path.islink(src_name): link_dest = os.readlink(src_name) if verbose >= 1: diff --git a/lib-python/3/distutils/sysconfig.py b/lib-python/3/distutils/sysconfig.py --- a/lib-python/3/distutils/sysconfig.py +++ b/lib-python/3/distutils/sysconfig.py @@ -146,7 +146,7 @@ "I don't know where Python installs its library " "on platform '%s'" % os.name) -_USE_CLANG = None + def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. @@ -155,42 +155,28 @@ varies across Unices and is stored in Python's Makefile. """ if compiler.compiler_type == "unix": - (cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \ + if sys.platform == "darwin": + # Perform first-time customization of compiler-related + # config vars on OS X now that we know we need a compiler. + # This is primarily to support Pythons from binary + # installers. The kind and paths to build tools on + # the user system may vary significantly from the system + # that Python itself was built on. Also the user OS + # version and build tools may not support the same set + # of CPU architectures for universal builds. + global _config_vars + if not _config_vars.get('CUSTOMIZED_OSX_COMPILER', ''): + import _osx_support + _osx_support.customize_compiler(_config_vars) + _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' + + (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', - 'CCSHARED', 'LDSHARED', 'SO', 'AR', 'ARFLAGS') + 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') newcc = None if 'CC' in os.environ: - newcc = os.environ['CC'] - elif sys.platform == 'darwin' and cc == 'gcc-4.2': - # Issue #13590: - # Since Apple removed gcc-4.2 in Xcode 4.2, we can no - # longer assume it is available for extension module builds. - # If Python was built with gcc-4.2, check first to see if - # it is available on this system; if not, try to use clang - # instead unless the caller explicitly set CC. - global _USE_CLANG - if _USE_CLANG is None: - from distutils import log - from subprocess import Popen, PIPE - p = Popen("! type gcc-4.2 && type clang && exit 2", - shell=True, stdout=PIPE, stderr=PIPE) - p.wait() - if p.returncode == 2: - _USE_CLANG = True - log.warn("gcc-4.2 not found, using clang instead") - else: - _USE_CLANG = False - if _USE_CLANG: - newcc = 'clang' - if newcc: - # On OS X, if CC is overridden, use that as the default - # command for LDSHARED as well - if (sys.platform == 'darwin' - and 'LDSHARED' not in os.environ - and ldshared.startswith(cc)): - ldshared = newcc + ldshared[len(cc):] - cc = newcc + cc = os.environ['CC'] if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: @@ -225,7 +211,7 @@ linker_exe=cc, archiver=archiver) - compiler.shared_lib_extension = so_ext + compiler.shared_lib_extension = shlib_suffix def get_config_h_filename(): @@ -480,6 +466,7 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" g['VERSION'] = get_python_version().replace(".", "") g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) @@ -499,6 +486,7 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" global _config_vars @@ -543,43 +531,11 @@ srcdir = os.path.join(base, _config_vars['srcdir']) _config_vars['srcdir'] = os.path.normpath(srcdir) + # OS X platforms require special customization to handle + # multi-architecture, multi-os-version installers if sys.platform == 'darwin': - kernel_version = os.uname()[2] # Kernel version (8.4.3) - major_version = int(kernel_version.split('.')[0]) - - if major_version < 8: - # On Mac OS X before 10.4, check if -arch and -isysroot - # are in CFLAGS or LDFLAGS and remove them if they are. - # This is needed when building extensions on a 10.3 system - # using a universal build of python. - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - flags = _config_vars[key] - flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) - flags = re.sub('-isysroot [^ \t]*', ' ', flags) - _config_vars[key] = flags - - else: - - # Allow the user to override the architecture flags using - # an environment variable. - # NOTE: This name was introduced by Apple in OSX 10.5 and - # is used by several scripting languages distributed with - # that OS release. - - if 'ARCHFLAGS' in os.environ: - arch = os.environ['ARCHFLAGS'] - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _config_vars[key] - flags = re.sub('-arch\s+\w+\s', ' ', flags) - flags = flags + ' ' + arch - _config_vars[key] = flags + import _osx_support + _osx_support.customize_config_vars(_config_vars) if args: vals = [] diff --git a/lib-python/3/distutils/tests/test_bdist_dumb.py b/lib-python/3/distutils/tests/test_bdist_dumb.py --- a/lib-python/3/distutils/tests/test_bdist_dumb.py +++ b/lib-python/3/distutils/tests/test_bdist_dumb.py @@ -88,9 +88,9 @@ fp.close() contents = sorted(os.path.basename(fn) for fn in contents) - wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], - 'foo.%s.pyc' % imp.get_tag(), - 'foo.py'] + wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py'] + if not sys.dont_write_bytecode: + wanted.append('foo.%s.pyc' % imp.get_tag()) self.assertEqual(contents, sorted(wanted)) def test_suite(): diff --git a/lib-python/3/distutils/tests/test_bdist_msi.py b/lib-python/3/distutils/tests/test_bdist_msi.py --- a/lib-python/3/distutils/tests/test_bdist_msi.py +++ b/lib-python/3/distutils/tests/test_bdist_msi.py @@ -1,12 +1,11 @@ """Tests for distutils.command.bdist_msi.""" +import sys import unittest -import sys - from test.support import run_unittest - from distutils.tests import support - at unittest.skipUnless(sys.platform=="win32", "These tests are only for win32") + + at unittest.skipUnless(sys.platform == 'win32', 'these tests require Windows') class BDistMSITestCase(support.TempdirManager, support.LoggingSilencer, unittest.TestCase): @@ -14,10 +13,11 @@ def test_minimal(self): # minimal test XXX need more tests from distutils.command.bdist_msi import bdist_msi - pkg_pth, dist = self.create_dist() + project_dir, dist = self.create_dist() cmd = bdist_msi(dist) cmd.ensure_finalized() + def test_suite(): return unittest.makeSuite(BDistMSITestCase) diff --git a/lib-python/3/distutils/tests/test_build_ext.py b/lib-python/3/distutils/tests/test_build_ext.py --- a/lib-python/3/distutils/tests/test_build_ext.py +++ b/lib-python/3/distutils/tests/test_build_ext.py @@ -73,8 +73,9 @@ self.assertEqual(xx.foo(2, 5), 7) self.assertEqual(xx.foo(13,15), 28) self.assertEqual(xx.new().demo(), None) - doc = 'This is a template module just for instruction.' - self.assertEqual(xx.__doc__, doc) + if support.HAVE_DOCSTRINGS: + doc = 'This is a template module just for instruction.' + self.assertEqual(xx.__doc__, doc) self.assertTrue(isinstance(xx.Null(), xx.Null)) self.assertTrue(isinstance(xx.Str(), xx.Str)) @@ -317,8 +318,8 @@ finally: os.chdir(old_wd) self.assertTrue(os.path.exists(so_file)) - so_ext = sysconfig.get_config_var('SO') - self.assertTrue(so_file.endswith(so_ext)) + ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') + self.assertTrue(so_file.endswith(ext_suffix)) so_dir = os.path.dirname(so_file) self.assertEqual(so_dir, other_tmp_dir) @@ -327,7 +328,7 @@ cmd.run() so_file = cmd.get_outputs()[0] self.assertTrue(os.path.exists(so_file)) - self.assertTrue(so_file.endswith(so_ext)) + self.assertTrue(so_file.endswith(ext_suffix)) so_dir = os.path.dirname(so_file) self.assertEqual(so_dir, cmd.build_lib) @@ -354,7 +355,7 @@ self.assertEqual(lastdir, 'bar') def test_ext_fullpath(self): - ext = sysconfig.get_config_vars()['SO'] + ext = sysconfig.get_config_var('EXT_SUFFIX') # building lxml.etree inplace #etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c') #etree_ext = Extension('lxml.etree', [etree_c]) diff --git a/lib-python/3/distutils/tests/test_dir_util.py b/lib-python/3/distutils/tests/test_dir_util.py --- a/lib-python/3/distutils/tests/test_dir_util.py +++ b/lib-python/3/distutils/tests/test_dir_util.py @@ -76,7 +76,6 @@ remove_tree(self.root_target, verbose=0) - def test_copy_tree_verbosity(self): mkpath(self.target, verbose=0) @@ -88,11 +87,8 @@ mkpath(self.target, verbose=0) a_file = os.path.join(self.target, 'ok.txt') - f = open(a_file, 'w') - try: + with open(a_file, 'w') as f: f.write('some content') - finally: - f.close() wanted = ['copying %s -> %s' % (a_file, self.target2)] copy_tree(self.target, self.target2, verbose=1) @@ -101,6 +97,21 @@ remove_tree(self.root_target, verbose=0) remove_tree(self.target2, verbose=0) + def test_copy_tree_skips_nfs_temp_files(self): + mkpath(self.target, verbose=0) + + a_file = os.path.join(self.target, 'ok.txt') + nfs_file = os.path.join(self.target, '.nfs123abc') + for f in a_file, nfs_file: + with open(f, 'w') as fh: + fh.write('some content') + + copy_tree(self.target, self.target2) + self.assertEqual(os.listdir(self.target2), ['ok.txt']) + + remove_tree(self.root_target, verbose=0) + remove_tree(self.target2, verbose=0) + def test_ensure_relative(self): if os.sep == '/': self.assertEqual(ensure_relative('/home/foo'), 'home/foo') diff --git a/lib-python/3/distutils/tests/test_install.py b/lib-python/3/distutils/tests/test_install.py --- a/lib-python/3/distutils/tests/test_install.py +++ b/lib-python/3/distutils/tests/test_install.py @@ -23,7 +23,7 @@ def _make_ext_name(modname): if os.name == 'nt' and sys.executable.endswith('_d.exe'): modname += '_d' - return modname + sysconfig.get_config_var('SO') + return modname + sysconfig.get_config_var('EXT_SUFFIX') class InstallTestCase(support.TempdirManager, @@ -165,7 +165,7 @@ cmd.home = 'home' self.assertRaises(DistutilsOptionError, cmd.finalize_options) - # can't combine user with with prefix/exec_prefix/home or + # can't combine user with prefix/exec_prefix/home or # install_(plat)base cmd.prefix = None cmd.user = 'user' diff --git a/lib-python/3/distutils/tests/test_msvc9compiler.py b/lib-python/3/distutils/tests/test_msvc9compiler.py --- a/lib-python/3/distutils/tests/test_msvc9compiler.py +++ b/lib-python/3/distutils/tests/test_msvc9compiler.py @@ -104,7 +104,7 @@ unittest.TestCase): def test_no_compiler(self): - # makes sure query_vcvarsall throws + # makes sure query_vcvarsall raises # a DistutilsPlatformError if the compiler # is not found from distutils.msvc9compiler import query_vcvarsall diff --git a/lib-python/3/distutils/tests/test_register.py b/lib-python/3/distutils/tests/test_register.py --- a/lib-python/3/distutils/tests/test_register.py +++ b/lib-python/3/distutils/tests/test_register.py @@ -1,5 +1,4 @@ """Tests for distutils.command.register.""" -import sys import os import unittest import getpass @@ -10,11 +9,14 @@ from distutils.command import register as register_module from distutils.command.register import register -from distutils.core import Distribution from distutils.errors import DistutilsSetupError -from distutils.tests import support -from distutils.tests.test_config import PYPIRC, PyPIRCCommandTestCase +from distutils.tests.test_config import PyPIRCCommandTestCase + +try: + import docutils +except ImportError: + docutils = None PYPIRC_NOPASSWORD = """\ [distutils] @@ -193,6 +195,7 @@ self.assertEqual(headers['Content-length'], '290') self.assertTrue((b'tarek') in req.data) + @unittest.skipUnless(docutils is not None, 'needs docutils') def test_strict(self): # testing the script option # when on, the register command stops if @@ -205,13 +208,6 @@ cmd.strict = 1 self.assertRaises(DistutilsSetupError, cmd.run) - # we don't test the reSt feature if docutils - # is not installed - try: - import docutils - except ImportError: - return - # metadata are OK but long_description is broken metadata = {'url': 'xxx', 'author': 'xxx', 'author_email': 'éxéxé', @@ -265,6 +261,22 @@ finally: del register_module.input + @unittest.skipUnless(docutils is not None, 'needs docutils') + def test_register_invalid_long_description(self): + description = ':funkie:`str`' # mimic Sphinx-specific markup + metadata = {'url': 'xxx', 'author': 'xxx', + 'author_email': 'xxx', + 'name': 'xxx', 'version': 'xxx', + 'long_description': description} + cmd = self._get_cmd(metadata) + cmd.ensure_finalized() + cmd.strict = True + inputs = Inputs('2', 'tarek', 'tarek at ziade.org') + register_module.input = inputs + self.addCleanup(delattr, register_module, 'input') + + self.assertRaises(DistutilsSetupError, cmd.run) + def test_check_metadata_deprecated(self): # makes sure make_metadata is deprecated cmd = self._get_cmd() diff --git a/lib-python/3/distutils/tests/test_sdist.py b/lib-python/3/distutils/tests/test_sdist.py --- a/lib-python/3/distutils/tests/test_sdist.py +++ b/lib-python/3/distutils/tests/test_sdist.py @@ -6,6 +6,7 @@ import zipfile from os.path import join from textwrap import dedent +from test.support import captured_stdout, check_warnings, run_unittest try: import zlib @@ -13,7 +14,6 @@ except ImportError: ZLIB_SUPPORT = False -from test.support import captured_stdout, check_warnings, run_unittest from distutils.command.sdist import sdist, show_formats from distutils.core import Distribution @@ -83,9 +83,8 @@ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') def test_prune_file_list(self): - # this test creates a package with some vcs dirs in it - # and launch sdist to make sure they get pruned - # on all systems + # this test creates a project with some VCS dirs and an NFS rename + # file, then launches sdist to check they get pruned on all systems # creating VCS directories with some files in them os.mkdir(join(self.tmp_dir, 'somecode', '.svn')) @@ -99,6 +98,8 @@ self.write_file((self.tmp_dir, 'somecode', '.git', 'ok'), 'xxx') + self.write_file((self.tmp_dir, 'somecode', '.nfs0001'), 'xxx') + # now building a sdist dist, cmd = self.get_cmd() @@ -326,6 +327,7 @@ # filling data_files by pointing files in package_data dist.package_data = {'somecode': ['*.txt']} self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#') + cmd.formats = ['gztar'] cmd.ensure_finalized() cmd.run() diff --git a/lib-python/3/distutils/tests/test_sysconfig.py b/lib-python/3/distutils/tests/test_sysconfig.py --- a/lib-python/3/distutils/tests/test_sysconfig.py +++ b/lib-python/3/distutils/tests/test_sysconfig.py @@ -102,7 +102,27 @@ import sysconfig as global_sysconfig self.assertEqual(global_sysconfig.get_config_var('CFLAGS'), sysconfig.get_config_var('CFLAGS')) self.assertEqual(global_sysconfig.get_config_var('LDFLAGS'), sysconfig.get_config_var('LDFLAGS')) - self.assertEqual(global_sysconfig.get_config_var('LDSHARED'),sysconfig.get_config_var('LDSHARED')) + + @unittest.skipIf(sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'),'compiler flags customized') + def test_sysconfig_compiler_vars(self): + # On OS X, binary installers support extension module building on + # various levels of the operating system with differing Xcode + # configurations. This requires customization of some of the + # compiler configuration directives to suit the environment on + # the installed machine. Some of these customizations may require + # running external programs and, so, are deferred until needed by + # the first extension module build. With Python 3.3, only + # the Distutils version of sysconfig is used for extension module + # builds, which happens earlier in the Distutils tests. This may + # cause the following tests to fail since no tests have caused + # the global version of sysconfig to call the customization yet. + # The solution for now is to simply skip this test in this case. + # The longer-term solution is to only have one version of sysconfig. + + import sysconfig as global_sysconfig + if sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'): + return + self.assertEqual(global_sysconfig.get_config_var('LDSHARED'), sysconfig.get_config_var('LDSHARED')) self.assertEqual(global_sysconfig.get_config_var('CC'), sysconfig.get_config_var('CC')) diff --git a/lib-python/3/distutils/tests/test_util.py b/lib-python/3/distutils/tests/test_util.py --- a/lib-python/3/distutils/tests/test_util.py +++ b/lib-python/3/distutils/tests/test_util.py @@ -13,6 +13,7 @@ from distutils.sysconfig import get_config_vars from distutils import sysconfig from distutils.tests import support +import _osx_support class UtilTestCase(support.EnvironGuard, unittest.TestCase): @@ -92,6 +93,7 @@ ('Darwin Kernel Version 8.11.1: ' 'Wed Oct 10 18:23:28 PDT 2007; ' 'root:xnu-792.25.20~1/RELEASE_I386'), 'i386')) + _osx_support._remove_original_values(get_config_vars()) get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3' get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g ' @@ -105,6 +107,7 @@ sys.maxsize = cursize From noreply at buildbot.pypy.org Thu Mar 13 02:26:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 13 Mar 2014 02:26:03 +0100 (CET) Subject: [pypy-commit] pypy default: py3k compat Message-ID: <20140313012603.AA2C21C3627@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69918:2d98701f2735 Date: 2014-03-12 18:23 -0700 http://bitbucket.org/pypy/pypy/changeset/2d98701f2735/ Log: py3k compat diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -350,7 +350,7 @@ r_sample = getsample(cp, size, i + 1) sample = (l_sample * fac1) + (r_sample * fac2) - sample = clip(sample) + sample = int(clip(sample)) _put_sample(result, size, i // 2, sample) @@ -501,7 +501,7 @@ # slice off extra bytes trim_index = (out_i * bytes_per_frame) - len(retval) - retval = _buffer(retval)[:trim_index] + retval = retval[:trim_index] return (retval, (d, tuple(samps))) From noreply at buildbot.pypy.org Thu Mar 13 02:26:05 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 13 Mar 2014 02:26:05 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140313012605.205621C3627@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69919:50d426596929 Date: 2014-03-12 18:24 -0700 http://bitbucket.org/pypy/pypy/changeset/50d426596929/ Log: merge default diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -349,7 +349,7 @@ r_sample = getsample(cp, size, i + 1) sample = (l_sample * fac1) + (r_sample * fac2) - sample = clip(sample) + sample = int(clip(sample)) _put_sample(result, size, i // 2, sample) @@ -500,7 +500,7 @@ # slice off extra bytes trim_index = (out_i * bytes_per_frame) - len(retval) - retval = _buffer(retval)[:trim_index] + retval = retval[:trim_index] return (retval, (d, tuple(samps))) From noreply at buildbot.pypy.org Thu Mar 13 02:26:06 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 13 Mar 2014 02:26:06 +0100 (CET) Subject: [pypy-commit] pypy py3k: port skips from default Message-ID: <20140313012606.6F2771C3627@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69920:9e5eb8da8506 Date: 2014-03-12 18:25 -0700 http://bitbucket.org/pypy/pypy/changeset/9e5eb8da8506/ Log: port skips from default diff --git a/lib-python/3/test/test_audioop.py b/lib-python/3/test/test_audioop.py --- a/lib-python/3/test/test_audioop.py +++ b/lib-python/3/test/test_audioop.py @@ -1,6 +1,6 @@ import audioop import unittest -from test.support import run_unittest +from test.support import run_unittest, impl_detail endian = 'big' if audioop.getsample(b'\0\1', 2, 0) == 1 else 'little' @@ -93,21 +93,25 @@ wtd = len(d2)//3 self.assertEqual(len(audioop.lin2lin(d1, got, wtd)), len(d2)) + @impl_detail(pypy=False) def test_adpcm2lin(self): # Very cursory test self.assertEqual(audioop.adpcm2lin(b'\0\0', 1, None), (b'\0' * 4, (0,0))) self.assertEqual(audioop.adpcm2lin(b'\0\0', 2, None), (b'\0' * 8, (0,0))) self.assertEqual(audioop.adpcm2lin(b'\0\0', 4, None), (b'\0' * 16, (0,0))) + @impl_detail(pypy=False) def test_lin2adpcm(self): # Very cursory test self.assertEqual(audioop.lin2adpcm(b'\0\0\0\0', 1, None), (b'\0\0', (0,0))) + @impl_detail(pypy=False) def test_lin2alaw(self): self.assertEqual(audioop.lin2alaw(data[0], 1), b'\xd5\xc5\xf5') self.assertEqual(audioop.lin2alaw(data[1], 2), b'\xd5\xd5\xd5') self.assertEqual(audioop.lin2alaw(data[2], 4), b'\xd5\xd5\xd5') + @impl_detail(pypy=False) def test_alaw2lin(self): # Cursory d = audioop.lin2alaw(data[0], 1) @@ -123,11 +127,13 @@ self.assertEqual(audioop.alaw2lin(d, 4), b'\x00\x00\x08\x00\x00\x00\x08\x01\x00\x00\x10\x02') + @impl_detail(pypy=False) def test_lin2ulaw(self): self.assertEqual(audioop.lin2ulaw(data[0], 1), b'\xff\xe7\xdb') self.assertEqual(audioop.lin2ulaw(data[1], 2), b'\xff\xff\xff') self.assertEqual(audioop.lin2ulaw(data[2], 4), b'\xff\xff\xff') + @impl_detail(pypy=False) def test_ulaw2lin(self): # Cursory d = audioop.lin2ulaw(data[0], 1) @@ -197,6 +203,7 @@ self.assertRaises(audioop.error, audioop.findmax, ''.join(chr(x) for x in range(256)), -2392392) + @impl_detail(pypy=False) def test_issue7673(self): state = None for data, size in INVALID_DATA: @@ -221,6 +228,7 @@ self.assertRaises(audioop.error, audioop.lin2alaw, data, size) self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state) + @impl_detail(pypy=False) def test_wrongsize(self): data = b'abc' state = None From noreply at buildbot.pypy.org Thu Mar 13 03:49:42 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 13 Mar 2014 03:49:42 +0100 (CET) Subject: [pypy-commit] pypy py3k: add int.__ceil/floor__, cleanup Message-ID: <20140313024942.C32141C029E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69921:a0911b1c0cb1 Date: 2014-03-12 19:38 -0700 http://bitbucket.org/pypy/pypy/changeset/a0911b1c0cb1/ Log: add int.__ceil/floor__, cleanup diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -177,11 +177,25 @@ _, w_r = space.fixedview(w_tuple, 2) return space.sub(self, w_r) - def _int(self, space): - return self.int(space) + def _self_unaryop(opname, doc=None): + @func_renamer('descr_' + opname) + def descr_unaryop(self, space): + return self.int(space) + descr_unaryop.__doc__ = doc + return descr_unaryop - descr_get_numerator = func_with_new_name(_int, 'descr_get_numerator') - descr_get_real = func_with_new_name(_int, 'descr_get_real') + descr_conjugate = _self_unaryop( + 'conjugate', "Returns self, the complex conjugate of any int.") + descr_pos = _self_unaryop('pos', "x.__pos__() <==> +x") + descr_index = _self_unaryop('index', + "x[y:z] <==> x[y.__index__():z.__index__()]") + descr_trunc = _self_unaryop('trunc', + "Truncating an Integral returns itself.") + descr_floor = _self_unaryop('floor', "Flooring an Integral returns itself.") + descr_ceil = _self_unaryop('ceil', "Ceiling of an Integral returns itself.") + + descr_get_numerator = _self_unaryop('get_numerator') + descr_get_real = _self_unaryop('get_real') def descr_get_denominator(self, space): return wrapint(space, 1) @@ -217,8 +231,6 @@ descr_repr = _abstract_unaryop('repr') descr_str = _abstract_unaryop('str') - descr_conjugate = _abstract_unaryop( - 'conjugate', "Returns self, the complex conjugate of any int.") descr_bit_length = _abstract_unaryop('bit_length', """\ int.bit_length() -> int @@ -229,14 +241,7 @@ 6""") descr_hash = _abstract_unaryop('hash') descr_getnewargs = _abstract_unaryop('getnewargs', None) - - descr_index = _abstract_unaryop( - 'index', "x[y:z] <==> x[y.__index__():z.__index__()]") - descr_trunc = _abstract_unaryop('trunc', - "Truncating an Integral returns itself.") descr_float = _abstract_unaryop('float') - - descr_pos = _abstract_unaryop('pos', "x.__pos__() <==> +x") descr_neg = _abstract_unaryop('neg', "x.__neg__() <==> -x") descr_abs = _abstract_unaryop('abs') descr_bool = _abstract_unaryop('bool', "x.__bool__() <==> x != 0") @@ -531,14 +536,6 @@ x = intmask(intmask(x) * sign) return wrapint(space, -2 if x == -1 else x) - def _int(self, space): - return self.int(space) - - descr_pos = func_with_new_name(_int, 'descr_pos') - descr_index = func_with_new_name(_int, 'descr_index') - descr_trunc = func_with_new_name(_int, 'descr_trunc') - descr_conjugate = func_with_new_name(_int, 'descr_conjugate') - def as_w_long(self, space): # XXX: should try smalllong from pypy.objspace.std.longobject import W_LongObject @@ -990,6 +987,8 @@ __abs__ = interpindirect2app(W_AbstractIntObject.descr_abs), __bool__ = interpindirect2app(W_AbstractIntObject.descr_bool), __invert__ = interpindirect2app(W_AbstractIntObject.descr_invert), + __floor__ = interpindirect2app(W_AbstractIntObject.descr_floor), + __ceil__ = interpindirect2app(W_AbstractIntObject.descr_ceil), __lt__ = interpindirect2app(W_AbstractIntObject.descr_lt), __le__ = interpindirect2app(W_AbstractIntObject.descr_le), diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -43,9 +43,6 @@ def descr_getnewargs(self, space): return space.newtuple([newlong(space, self.asbigint())]) - def descr_conjugate(self, space): - return self.int(space) - def descr_bit_length(self, space): bigint = space.bigint_w(self) try: @@ -164,8 +161,6 @@ def __repr__(self): return '' % self.num.tolong() - descr_index = descr_trunc = descr_pos = int - def descr_float(self, space): return space.newfloat(self.tofloat(space)) diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -79,8 +79,6 @@ return W_LongObject(self.num) return W_Root.int(self, space) - descr_index = descr_trunc = descr_pos = int - def descr_float(self, space): return space.newfloat(float(self.longlong)) diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -551,6 +551,12 @@ assert ns['a'] == 9007199254740991.0 assert ns['b'] == 9007199254740991.0 + def test_ceil(self): + assert 8 .__ceil__() == 8 + + def test_floor(self): + assert 8 .__floor__() == 8 + class AppTestIntShortcut(AppTestInt): spaceconfig = {"objspace.std.intshortcut": True} From noreply at buildbot.pypy.org Thu Mar 13 03:49:44 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 13 Mar 2014 03:49:44 +0100 (CET) Subject: [pypy-commit] pypy py3k: workaround lack of space.hex/oct on py3k Message-ID: <20140313024944.2AFC51C029E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69922:1366c85a0748 Date: 2014-03-12 19:38 -0700 http://bitbucket.org/pypy/pypy/changeset/1366c85a0748/ Log: workaround lack of space.hex/oct on py3k diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1043,7 +1043,7 @@ raise oefmt(space.w_TypeError, "don't know how to convert scalar number to hex") value = self.get_scalar_value() - return space.hex(value) + return space.call_method(space.builtin, 'hex', value) def descr_oct(self, space): if self.get_size() != 1: @@ -1053,7 +1053,7 @@ raise oefmt(space.w_TypeError, "don't know how to convert scalar number to oct") value = self.get_scalar_value() - return space.oct(value) + return space.call_method(space.builtin, 'oct', value) def descr_index(self, space): if self.get_size() != 1 or \ From noreply at buildbot.pypy.org Thu Mar 13 03:49:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 13 Mar 2014 03:49:45 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill int.__div__ Message-ID: <20140313024945.6914C1C029E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69923:0c391272a085 Date: 2014-03-12 19:47 -0700 http://bitbucket.org/pypy/pypy/changeset/0c391272a085/ Log: kill int.__div__ diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -288,7 +288,6 @@ descr_rshift, descr_rrshift = _abstract_binop('rshift') descr_floordiv, descr_rfloordiv = _abstract_binop('floordiv') - descr_div, descr_rdiv = _abstract_binop('div') descr_truediv, descr_rtruediv = _abstract_binop('truediv') descr_mod, descr_rmod = _abstract_binop('mod') descr_divmod, descr_rdivmod = _abstract_binop('divmod') @@ -301,7 +300,6 @@ raise oefmt(space.w_ZeroDivisionError, "integer division or modulo by zero") return wrapint(space, z) -_div = func_with_new_name(_floordiv, '_div') def _truediv(space, x, y): @@ -760,7 +758,6 @@ descr_rshift, descr_rrshift = _make_descr_binop(_rshift, ovf=False) descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) - descr_div, descr_rdiv = _make_descr_binop(_div) descr_truediv, descr_rtruediv = _make_descr_binop(_truediv) descr_mod, descr_rmod = _make_descr_binop(_mod) descr_divmod, descr_rdivmod = _make_descr_binop( @@ -1018,8 +1015,6 @@ __floordiv__ = interpindirect2app(W_AbstractIntObject.descr_floordiv), __rfloordiv__ = interpindirect2app(W_AbstractIntObject.descr_rfloordiv), - __div__ = interpindirect2app(W_AbstractIntObject.descr_div), - __rdiv__ = interpindirect2app(W_AbstractIntObject.descr_rdiv), __truediv__ = interpindirect2app(W_AbstractIntObject.descr_truediv), __rtruediv__ = interpindirect2app(W_AbstractIntObject.descr_rtruediv), __mod__ = interpindirect2app(W_AbstractIntObject.descr_mod), diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -304,9 +304,6 @@ return newlong(space, z) descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) - _div = func_with_new_name(_floordiv, '_div') - descr_div, descr_rdiv = _make_descr_binop(_div) - def _mod(self, space, w_other): try: z = self.num.mod(w_other.asbigint()) diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -266,9 +266,6 @@ return W_SmallLongObject(z) descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) - _div = func_with_new_name(_floordiv, '_div') - descr_div, descr_rdiv = _make_descr_binop(_div) - def _mod(self, space, w_other): x = self.longlong y = w_other.longlong diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -111,22 +111,6 @@ assert space.isinstance_w(v, space.w_int) assert space.bigint_w(v).eq(rbigint.fromlong(x * y)) - def test_div(self): - space = self.space - for i in range(10): - res = i//3 - f1 = iobj.W_IntObject(i) - f2 = iobj.W_IntObject(3) - result = f1.descr_div(space, f2) - assert result.intval == res - x = -sys.maxint-1 - y = -1 - f1 = iobj.W_IntObject(x) - f2 = iobj.W_IntObject(y) - v = f1.descr_div(space, f2) - assert space.isinstance_w(v, space.w_int) - assert space.bigint_w(v).eq(rbigint.fromlong(x / y)) - def test_mod(self): x = 1 y = 2 From noreply at buildbot.pypy.org Thu Mar 13 08:00:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 08:00:19 +0100 (CET) Subject: [pypy-commit] pypy default: Print the content of the "sections" too, which is stdout/stderr. Message-ID: <20140313070019.5D28A1C14E6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69924:768f274ab83b Date: 2014-03-13 07:59 +0100 http://bitbucket.org/pypy/pypy/changeset/768f274ab83b/ Log: Print the content of the "sections" too, which is stdout/stderr. diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -51,16 +51,22 @@ self.config = config self.logfile = logfile # preferably line buffered - def write_log_entry(self, testpath, lettercode, longrepr): + def write_log_entry(self, testpath, lettercode, longrepr, sections=[]): py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile) for line in longrepr.splitlines(): py.builtin.print_(" %s" % line, file=self.logfile) + for key, text in sections: + py.builtin.print_(" ", file=self.logfile) + py.builtin.print_(" -------------------- %s --------------------" + % key.rstrip(), file=self.logfile) + py.builtin.print_(" %s" % (text.rstrip().replace('\n', '\n '),), + file=self.logfile) def log_outcome(self, report, lettercode, longrepr): testpath = getattr(report, 'nodeid', None) if testpath is None: testpath = report.fspath - self.write_log_entry(testpath, lettercode, longrepr) + self.write_log_entry(testpath, lettercode, longrepr, report.sections) def pytest_runtest_logreport(self, report): if report.when != "call" and report.passed: From noreply at buildbot.pypy.org Thu Mar 13 08:12:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 08:12:11 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Test and fix for GC roots stored in non-GC prebuilt objects Message-ID: <20140313071211.C78C61C08C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69925:d944127917e1 Date: 2014-03-13 08:11 +0100 http://bitbucket.org/pypy/pypy/changeset/d944127917e1/ Log: Test and fix for GC roots stored in non-GC prebuilt objects diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -18,6 +18,7 @@ needs_write_barrier = False malloc_zero_filled = False prebuilt_gc_objects_are_static_roots = True + ignore_immutable_static_roots = True object_minimal_size = 0 gcflag_extra = 0 # or a real GC flag that is always 0 when not collecting diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -23,6 +23,7 @@ inline_simple_malloc_varsize = True needs_write_barrier = "stm" prebuilt_gc_objects_are_static_roots = False + ignore_immutable_static_roots = False malloc_zero_filled = True object_minimal_size = 16 #gcflag_extra = GCFLAG_EXTRA diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py --- a/rpython/memory/gctypelayout.py +++ b/rpython/memory/gctypelayout.py @@ -451,7 +451,8 @@ return else: appendto = self.addresses_of_static_ptrs_in_nongc - for a in gc_pointers_inside(value, adr, mutable_only=True): + mutable_only = gc.ignore_immutable_static_roots + for a in gc_pointers_inside(value, adr, mutable_only=mutable_only): appendto.append(a) # ____________________________________________________________ diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -882,6 +882,14 @@ *pp = stm_setup_prebuilt(*pp); stm_set_prebuilt_identityhash(*pp, *ph); } + + object_t ***cur = (object_t ***) + pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_static_root_start; + object_t ***end = (object_t ***) + pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_static_root_nongcend; + for ( ; cur != end; cur++) { + **cur = stm_setup_prebuilt(**cur); + } } ''' diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -433,3 +433,23 @@ assert ': -inf\n' in data data = cbuilder.cmdexec('2') assert ': nan\n' in data + + def test_static_root_in_nongc(self): + class A: + def __init__(self, n): + self.n = n + class B: + def _freeze_(self): + return True + b1 = B(); b1.a = A(42) + b2 = B(); b2.a = A(84) + def dump(b): + print '<', b.a.n, '>' + def main(argv): + dump(b1) + dump(b2) + return 0 + + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('') + assert '< 42 >\n< 84 >\n' in data From noreply at buildbot.pypy.org Thu Mar 13 09:27:22 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 13 Mar 2014 09:27:22 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: some cleanup Message-ID: <20140313082722.621571C08C1@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r991:0679cfa51e66 Date: 2014-03-13 09:26 +0100 http://bitbucket.org/pypy/stmgc/changeset/0679cfa51e66/ Log: some cleanup diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -8,13 +8,31 @@ OPT_ASSERT(size_rounded_up > sizeof(struct object_s)); object_t *obj = stm_allocate(size_rounded_up); - assert(_is_in_nursery(obj)); /* see assert(0) which depends on it */ - LIST_APPEND(STM_PSEGMENT->young_weakrefs, obj); return obj; } +void _set_weakref_in_all_segments(object_t *weakref, object_t *value) +{ + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, weakref); + ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); + + stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size); + if (flag_page_private[(uintptr_t)point_to_loc / 4096UL] == PRIVATE_PAGE) { + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + char *base = get_segment_base(i); /* two different segments */ + + object_t ** ref_loc = (object_t **)REAL_ADDRESS(base, point_to_loc); + *ref_loc = value; + } + } + else { + *WEAKREF_PTR(weakref, size) = value; + } +} + /***** Minor collection *****/ void stm_move_young_weakrefs() @@ -40,16 +58,13 @@ item = pforwarded_array[1]; /* moved location */ } else { - /* tell me if we need this (requires synchronizing in case - of private pages) */ - assert(0); - /* /\* young outside nursery object *\/ */ - /* if (tree_contains(STM_PSEGMENT->young_outside_nursery, */ - /* (uintptr_t)item)) { */ - /* /\* still in the tree -> wasn't seen by the minor collection, */ - /* so it doesn't survive *\/ */ - /* continue; */ - /* } */ + /* young outside nursery object */ + if (tree_contains(STM_PSEGMENT->young_outside_nursery, + (uintptr_t)item)) { + /* still in the tree -> wasn't seen by the minor collection, + so it doesn't survive */ + continue; + } } assert(!_is_young(item)); @@ -64,14 +79,12 @@ if (!(pointing_to->stm_flags & GCFLAG_HAS_SHADOW) || (pforwarded_array[0] != GCWORD_MOVED)) { /* pointing_to dies */ - *WEAKREF_PTR(item, size) = NULL; - synchronize_overflow_object_now(item); + _set_weakref_in_all_segments(item, NULL); continue; /* no need to remember in old_weakrefs */ } else { /* moved location */ - *WEAKREF_PTR(item, size) = pforwarded_array[1]; - synchronize_overflow_object_now(item); + _set_weakref_in_all_segments(item, pforwarded_array[1]); } } else { @@ -80,8 +93,7 @@ (uintptr_t)pointing_to)) { /* still in the tree -> wasn't seen by the minor collection, so it doesn't survive */ - *WEAKREF_PTR(item, size) = NULL; - synchronize_overflow_object_now(item); + _set_weakref_in_all_segments(item, NULL); continue; /* no need to remember in old_weakrefs */ } /* pointing_to was already old */ @@ -118,10 +130,7 @@ assert(pointing_to != NULL); if (!mark_visited_test(pointing_to)) { //assert(flag_page_private[(uintptr_t)weakref / 4096UL] != PRIVATE_PAGE); - *WEAKREF_PTR(weakref, size) = NULL; - if (flag_page_private[(uintptr_t)weakref / 4096UL] == PRIVATE_PAGE) { - synchronize_overflow_object_now(weakref); - } + _set_weakref_in_all_segments(weakref, NULL); /* we don't need it in this list anymore */ list_set_item(lst, n, list_pop_item(lst)); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -286,8 +286,13 @@ lib._set_type_id(o, tid) return o -def stm_allocate_weakref(point_to_obj): - o = lib.stm_allocate_weakref(HDR + WORD) +def stm_allocate_weakref(point_to_obj, size=None): + if size is None: + o = lib.stm_allocate_weakref(HDR + WORD) + else: + assert size >= HDR + WORD + o = lib.stm_allocate_weakref(size) + tid = 421420 lib._set_type_id(o, tid) lib._set_weakref(o, point_to_obj) diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -143,6 +143,37 @@ self.start_transaction() assert stm_get_weakref(lp1) == ffi.NULL + def test_multiple_threads_w_big_weakref(self): + self.start_transaction() + lp0 = stm_allocate(1024) + self.push_root(lp0) + self.commit_transaction() + + self.start_transaction() + lp0 = self.pop_root() + self.push_root(lp0) + stm_write(lp0) # privatize page + + self.push_root_no_gc() + lp2 = stm_allocate(48) + lp1 = stm_allocate_weakref( + lp2, size=lib._STM_FAST_ALLOC + 16) # no collection here + self.pop_root() + + self.push_root(lp0) + self.push_root(lp1) + self.commit_transaction() + # lp2 dies + lp1 = self.pop_root() + self.push_root(lp1) + + assert stm_get_weakref(lp1) == ffi.NULL + + self.switch(1) + + self.start_transaction() + assert stm_get_weakref(lp1) == ffi.NULL + From noreply at buildbot.pypy.org Thu Mar 13 09:47:18 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 13 Mar 2014 09:47:18 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: better documentation Message-ID: <20140313084718.BACEE1C14B3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r992:0eaf9d6d1535 Date: 2014-03-13 09:48 +0100 http://bitbucket.org/pypy/stmgc/changeset/0eaf9d6d1535/ Log: better documentation diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -197,9 +197,10 @@ /* Allocate a weakref object. Weakref objects have a reference to an object at the byte-offset stmcb_size_rounded_up(obj) - sizeof(void*) - This reference becomes NULL if the referenced object was freed. You must assign the reference before the next collection may happen. - After that, they may be considered immutable. */ + After that, you must not mutate the reference anymore. However, + it can become NULL after any GC if the reference dies during that + collection. */ object_t *stm_allocate_weakref(ssize_t size_rounded_up); From noreply at buildbot.pypy.org Thu Mar 13 09:52:24 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 13 Mar 2014 09:52:24 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: fix use of STM_SEGMENT during major collections Message-ID: <20140313085224.01F5C1C14C0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r993:1541f27d7248 Date: 2014-03-13 09:53 +0100 http://bitbucket.org/pypy/stmgc/changeset/1541f27d7248/ Log: fix use of STM_SEGMENT during major collections diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -13,9 +13,9 @@ } -void _set_weakref_in_all_segments(object_t *weakref, object_t *value) +void _set_weakref_in_all_segments(char* base, object_t *weakref, object_t *value) { - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, weakref); + char *realobj = REAL_ADDRESS(base, weakref); ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size); @@ -68,7 +68,8 @@ } assert(!_is_young(item)); - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, item); + char *base = STM_SEGMENT->segment_base; + char *realobj = REAL_ADDRESS(base, item); ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); object_t *pointing_to = *WEAKREF_PTR(item, size); assert(pointing_to != NULL); @@ -79,12 +80,12 @@ if (!(pointing_to->stm_flags & GCFLAG_HAS_SHADOW) || (pforwarded_array[0] != GCWORD_MOVED)) { /* pointing_to dies */ - _set_weakref_in_all_segments(item, NULL); + _set_weakref_in_all_segments(base, item, NULL); continue; /* no need to remember in old_weakrefs */ } else { /* moved location */ - _set_weakref_in_all_segments(item, pforwarded_array[1]); + _set_weakref_in_all_segments(base, item, pforwarded_array[1]); } } else { @@ -93,7 +94,7 @@ (uintptr_t)pointing_to)) { /* still in the tree -> wasn't seen by the minor collection, so it doesn't survive */ - _set_weakref_in_all_segments(item, NULL); + _set_weakref_in_all_segments(base, item, NULL); continue; /* no need to remember in old_weakrefs */ } /* pointing_to was already old */ @@ -130,7 +131,7 @@ assert(pointing_to != NULL); if (!mark_visited_test(pointing_to)) { //assert(flag_page_private[(uintptr_t)weakref / 4096UL] != PRIVATE_PAGE); - _set_weakref_in_all_segments(weakref, NULL); + _set_weakref_in_all_segments(pseg->pub.segment_base, weakref, NULL); /* we don't need it in this list anymore */ list_set_item(lst, n, list_pop_item(lst)); From noreply at buildbot.pypy.org Thu Mar 13 10:10:21 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 13 Mar 2014 10:10:21 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: simplify code by assuming sizeof(weakref) == 16 Message-ID: <20140313091021.D60971D23C6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r994:f52b09faef51 Date: 2014-03-13 10:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/f52b09faef51/ Log: simplify code by assuming sizeof(weakref) == 16 diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -2,21 +2,24 @@ # error "must be compiled via stmgc.c" #endif +#define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((stm_char *)(wr)) + (sz) - sizeof(void*))) object_t *stm_allocate_weakref(ssize_t size_rounded_up) { OPT_ASSERT(size_rounded_up > sizeof(struct object_s)); + OPT_ASSERT(size_rounded_up == 16); /* no reason for it to be anything else */ + object_t *obj = stm_allocate(size_rounded_up); + assert(_is_in_nursery(obj)); /* because it's so small */ LIST_APPEND(STM_PSEGMENT->young_weakrefs, obj); return obj; } -void _set_weakref_in_all_segments(char* base, object_t *weakref, object_t *value) +void _set_weakref_in_all_segments(object_t *weakref, object_t *value) { - char *realobj = REAL_ADDRESS(base, weakref); - ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); + ssize_t size = 16; stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size); if (flag_page_private[(uintptr_t)point_to_loc / 4096UL] == PRIVATE_PAGE) { @@ -45,32 +48,23 @@ STM_PSEGMENT->young_weakrefs, object_t * /*item*/, ({ - if (_is_in_nursery(item)) { - object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)item; + /* weakrefs are so small, they always are in the nursery. Never + a young outside nursery object. */ + assert(_is_in_nursery(item)); + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)item; - /* the following checks are done like in nursery.c: */ - if (!(item->stm_flags & GCFLAG_HAS_SHADOW) - || (pforwarded_array[0] != GCWORD_MOVED)) { - /* weakref dies */ - continue; - } + /* the following checks are done like in nursery.c: */ + if (!(item->stm_flags & GCFLAG_HAS_SHADOW) + || (pforwarded_array[0] != GCWORD_MOVED)) { + /* weakref dies */ + continue; + } - item = pforwarded_array[1]; /* moved location */ - } - else { - /* young outside nursery object */ - if (tree_contains(STM_PSEGMENT->young_outside_nursery, - (uintptr_t)item)) { - /* still in the tree -> wasn't seen by the minor collection, - so it doesn't survive */ - continue; - } - } + item = pforwarded_array[1]; /* moved location */ + assert(!_is_young(item)); - char *base = STM_SEGMENT->segment_base; - char *realobj = REAL_ADDRESS(base, item); - ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); + ssize_t size = 16; object_t *pointing_to = *WEAKREF_PTR(item, size); assert(pointing_to != NULL); @@ -80,12 +74,12 @@ if (!(pointing_to->stm_flags & GCFLAG_HAS_SHADOW) || (pforwarded_array[0] != GCWORD_MOVED)) { /* pointing_to dies */ - _set_weakref_in_all_segments(base, item, NULL); + _set_weakref_in_all_segments(item, NULL); continue; /* no need to remember in old_weakrefs */ } else { /* moved location */ - _set_weakref_in_all_segments(base, item, pforwarded_array[1]); + _set_weakref_in_all_segments(item, pforwarded_array[1]); } } else { @@ -94,7 +88,7 @@ (uintptr_t)pointing_to)) { /* still in the tree -> wasn't seen by the minor collection, so it doesn't survive */ - _set_weakref_in_all_segments(base, item, NULL); + _set_weakref_in_all_segments(item, NULL); continue; /* no need to remember in old_weakrefs */ } /* pointing_to was already old */ @@ -125,13 +119,12 @@ continue; } - char *realobj = REAL_ADDRESS(pseg->pub.segment_base, weakref); - ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); + ssize_t size = 16; object_t *pointing_to = *WEAKREF_PTR(weakref, size); assert(pointing_to != NULL); if (!mark_visited_test(pointing_to)) { //assert(flag_page_private[(uintptr_t)weakref / 4096UL] != PRIVATE_PAGE); - _set_weakref_in_all_segments(pseg->pub.segment_base, weakref, NULL); + _set_weakref_in_all_segments(weakref, NULL); /* we don't need it in this list anymore */ list_set_item(lst, n, list_pop_item(lst)); diff --git a/c7/stm/weakref.h b/c7/stm/weakref.h --- a/c7/stm/weakref.h +++ b/c7/stm/weakref.h @@ -2,8 +2,6 @@ #define _SRCSTM_WEAKREF_H -#define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((stm_char *)(wr)) + (sz) - sizeof(void*))) - void stm_move_young_weakrefs(void); void stm_visit_old_weakrefs(void); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -200,7 +200,9 @@ You must assign the reference before the next collection may happen. After that, you must not mutate the reference anymore. However, it can become NULL after any GC if the reference dies during that - collection. */ + collection. + NOTE: For performance, we assume stmcb_size_rounded_up(weakref)==16 +*/ object_t *stm_allocate_weakref(ssize_t size_rounded_up); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -287,11 +287,8 @@ return o def stm_allocate_weakref(point_to_obj, size=None): - if size is None: - o = lib.stm_allocate_weakref(HDR + WORD) - else: - assert size >= HDR + WORD - o = lib.stm_allocate_weakref(size) + assert HDR+WORD == 16 + o = lib.stm_allocate_weakref(HDR + WORD) tid = 421420 lib._set_type_id(o, tid) diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -143,37 +143,6 @@ self.start_transaction() assert stm_get_weakref(lp1) == ffi.NULL - def test_multiple_threads_w_big_weakref(self): - self.start_transaction() - lp0 = stm_allocate(1024) - self.push_root(lp0) - self.commit_transaction() - - self.start_transaction() - lp0 = self.pop_root() - self.push_root(lp0) - stm_write(lp0) # privatize page - - self.push_root_no_gc() - lp2 = stm_allocate(48) - lp1 = stm_allocate_weakref( - lp2, size=lib._STM_FAST_ALLOC + 16) # no collection here - self.pop_root() - - self.push_root(lp0) - self.push_root(lp1) - self.commit_transaction() - # lp2 dies - lp1 = self.pop_root() - self.push_root(lp1) - - assert stm_get_weakref(lp1) == ffi.NULL - - self.switch(1) - - self.start_transaction() - assert stm_get_weakref(lp1) == ffi.NULL - From noreply at buildbot.pypy.org Thu Mar 13 10:16:26 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 13 Mar 2014 10:16:26 +0100 (CET) Subject: [pypy-commit] stmgc c7-weakref: make functions static Message-ID: <20140313091626.54AC11C1DB6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-weakref Changeset: r995:99d119949f95 Date: 2014-03-13 10:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/99d119949f95/ Log: make functions static diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -17,7 +17,7 @@ } -void _set_weakref_in_all_segments(object_t *weakref, object_t *value) +static void _set_weakref_in_all_segments(object_t *weakref, object_t *value) { ssize_t size = 16; @@ -38,7 +38,7 @@ /***** Minor collection *****/ -void stm_move_young_weakrefs() +static void stm_move_young_weakrefs() { /* The code relies on the fact that no weakref can be an old object weakly pointing to a young object. Indeed, weakrefs are immutable @@ -102,7 +102,7 @@ /***** Major collection *****/ -void stm_visit_old_weakrefs(void) +static void stm_visit_old_weakrefs(void) { long i; for (i = 0; i < NB_SEGMENTS; i++) { diff --git a/c7/stm/weakref.h b/c7/stm/weakref.h --- a/c7/stm/weakref.h +++ b/c7/stm/weakref.h @@ -1,9 +1,9 @@ #ifndef _SRCSTM_WEAKREF_H #define _SRCSTM_WEAKREF_H - -void stm_move_young_weakrefs(void); -void stm_visit_old_weakrefs(void); +object_t *stm_allocate_weakref(ssize_t size_rounded_up); +static void stm_move_young_weakrefs(void); +static void stm_visit_old_weakrefs(void); #endif From noreply at buildbot.pypy.org Thu Mar 13 10:30:25 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 13 Mar 2014 10:30:25 +0100 (CET) Subject: [pypy-commit] stmgc default: merge weakref support Message-ID: <20140313093025.D803C1D23DB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r996:ae62acdb5d7c Date: 2014-03-13 10:31 +0100 http://bitbucket.org/pypy/stmgc/changeset/ae62acdb5d7c/ Log: merge weakref support diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -180,6 +180,7 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(list_is_empty(STM_PSEGMENT->young_weakrefs)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)); @@ -483,6 +484,7 @@ /* reset these lists to NULL too on abort */ LIST_FREE(pseg->objects_pointing_to_nursery); LIST_FREE(pseg->large_overflow_objects); + list_clear(pseg->young_weakrefs); } static void abort_with_mutex(void) diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -61,7 +61,7 @@ current transaction that have been flushed out of the nursery, which occurs if the same transaction allocates too many objects. */ - GCFLAG_OVERFLOW_NUMBER_bit0 = 0x08 /* must be last */ + GCFLAG_OVERFLOW_NUMBER_bit0 = 0x8 /* must be last */ }; @@ -105,6 +105,15 @@ next minor collection. */ struct tree_s *nursery_objects_shadows; + /* List of all young weakrefs to check in minor collections. These + are the only weakrefs that may point to young objects and never + contain NULL. */ + struct list_s *young_weakrefs; + + /* List of all old weakrefs to check in major collections. These + weakrefs never point to young objects and never contain NULL. */ + struct list_s *old_weakrefs; + /* Tree of 'key->callback' associations from stm_call_on_abort() */ struct tree_s *callbacks_on_abort; diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -450,7 +450,11 @@ /* 'objects_pointing_to_nursery' should be empty, but isn't necessarily because it also lists objects that have been written to but don't actually point to the nursery. Clear - it up and set GCFLAG_WRITE_BARRIER again on the objects. */ + it up and set GCFLAG_WRITE_BARRIER again on the objects. + This is the case for transactions where + MINOR_NOTHING_TO_DO() == false + but they still did write-barriers on objects + */ lst = pseg->objects_pointing_to_nursery; if (lst != NULL) { LIST_FOREACH_R(lst, uintptr_t /*item*/, @@ -537,6 +541,9 @@ mark_visit_from_roots(); LIST_FREE(mark_objects_to_trace); + /* weakrefs: */ + stm_visit_old_weakrefs(); + /* cleanup */ clean_up_segment_lists(); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -299,6 +299,9 @@ collect_oldrefs_to_nursery(); + /* now all surviving nursery objects have been moved out */ + stm_move_young_weakrefs(); + throw_away_nursery(get_priv_segment(STM_SEGMENT->segment_num)); assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -57,6 +57,8 @@ pr->objects_pointing_to_nursery = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); + pr->young_weakrefs = list_create(); + pr->old_weakrefs = list_create(); pr->young_outside_nursery = tree_create(); pr->nursery_objects_shadows = tree_create(); pr->callbacks_on_abort = tree_create(); @@ -95,6 +97,8 @@ assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); + list_free(pr->young_weakrefs); + list_free(pr->old_weakrefs); tree_free(pr->young_outside_nursery); tree_free(pr->nursery_objects_shadows); tree_free(pr->callbacks_on_abort); diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c new file mode 100644 --- /dev/null +++ b/c7/stm/weakref.c @@ -0,0 +1,138 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + +#define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((stm_char *)(wr)) + (sz) - sizeof(void*))) + +object_t *stm_allocate_weakref(ssize_t size_rounded_up) +{ + OPT_ASSERT(size_rounded_up > sizeof(struct object_s)); + OPT_ASSERT(size_rounded_up == 16); /* no reason for it to be anything else */ + + object_t *obj = stm_allocate(size_rounded_up); + assert(_is_in_nursery(obj)); /* because it's so small */ + + LIST_APPEND(STM_PSEGMENT->young_weakrefs, obj); + return obj; +} + + +static void _set_weakref_in_all_segments(object_t *weakref, object_t *value) +{ + ssize_t size = 16; + + stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size); + if (flag_page_private[(uintptr_t)point_to_loc / 4096UL] == PRIVATE_PAGE) { + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + char *base = get_segment_base(i); /* two different segments */ + + object_t ** ref_loc = (object_t **)REAL_ADDRESS(base, point_to_loc); + *ref_loc = value; + } + } + else { + *WEAKREF_PTR(weakref, size) = value; + } +} + +/***** Minor collection *****/ + +static void stm_move_young_weakrefs() +{ + /* The code relies on the fact that no weakref can be an old object + weakly pointing to a young object. Indeed, weakrefs are immutable + so they cannot point to an object that was created after it. + */ + LIST_FOREACH_R( + STM_PSEGMENT->young_weakrefs, + object_t * /*item*/, + ({ + /* weakrefs are so small, they always are in the nursery. Never + a young outside nursery object. */ + assert(_is_in_nursery(item)); + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)item; + + /* the following checks are done like in nursery.c: */ + if (!(item->stm_flags & GCFLAG_HAS_SHADOW) + || (pforwarded_array[0] != GCWORD_MOVED)) { + /* weakref dies */ + continue; + } + + item = pforwarded_array[1]; /* moved location */ + + assert(!_is_young(item)); + + ssize_t size = 16; + object_t *pointing_to = *WEAKREF_PTR(item, size); + assert(pointing_to != NULL); + + if (_is_in_nursery(pointing_to)) { + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)pointing_to; + /* the following checks are done like in nursery.c: */ + if (!(pointing_to->stm_flags & GCFLAG_HAS_SHADOW) + || (pforwarded_array[0] != GCWORD_MOVED)) { + /* pointing_to dies */ + _set_weakref_in_all_segments(item, NULL); + continue; /* no need to remember in old_weakrefs */ + } + else { + /* moved location */ + _set_weakref_in_all_segments(item, pforwarded_array[1]); + } + } + else { + /* young outside nursery object or old object */ + if (tree_contains(STM_PSEGMENT->young_outside_nursery, + (uintptr_t)pointing_to)) { + /* still in the tree -> wasn't seen by the minor collection, + so it doesn't survive */ + _set_weakref_in_all_segments(item, NULL); + continue; /* no need to remember in old_weakrefs */ + } + /* pointing_to was already old */ + } + LIST_APPEND(STM_PSEGMENT->old_weakrefs, item); + })); + list_clear(STM_PSEGMENT->young_weakrefs); +} + + +/***** Major collection *****/ + + +static void stm_visit_old_weakrefs(void) +{ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + struct list_s *lst; + + lst = pseg->old_weakrefs; + uintptr_t n = list_count(lst); + while (n > 0) { + object_t *weakref = (object_t *)list_item(lst, --n); + if (!mark_visited_test(weakref)) { + /* weakref dies */ + list_set_item(lst, n, list_pop_item(lst)); + continue; + } + + ssize_t size = 16; + object_t *pointing_to = *WEAKREF_PTR(weakref, size); + assert(pointing_to != NULL); + if (!mark_visited_test(pointing_to)) { + //assert(flag_page_private[(uintptr_t)weakref / 4096UL] != PRIVATE_PAGE); + _set_weakref_in_all_segments(weakref, NULL); + + /* we don't need it in this list anymore */ + list_set_item(lst, n, list_pop_item(lst)); + continue; + } + else { + /* it survives! */ + } + } + } +} diff --git a/c7/stm/weakref.h b/c7/stm/weakref.h new file mode 100644 --- /dev/null +++ b/c7/stm/weakref.h @@ -0,0 +1,9 @@ +#ifndef _SRCSTM_WEAKREF_H +#define _SRCSTM_WEAKREF_H + +object_t *stm_allocate_weakref(ssize_t size_rounded_up); +static void stm_move_young_weakrefs(void); +static void stm_visit_old_weakrefs(void); + + +#endif diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -12,6 +12,7 @@ #include "stm/contention.h" #include "stm/extra.h" #include "stm/fprintcolor.h" +#include "stm/weakref.h" #include "stm/misc.c" #include "stm/list.c" @@ -28,3 +29,4 @@ #include "stm/contention.c" #include "stm/extra.c" #include "stm/fprintcolor.c" +#include "stm/weakref.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -194,6 +194,17 @@ return (object_t *)p; } +/* Allocate a weakref object. Weakref objects have a + reference to an object at the byte-offset + stmcb_size_rounded_up(obj) - sizeof(void*) + You must assign the reference before the next collection may happen. + After that, you must not mutate the reference anymore. However, + it can become NULL after any GC if the reference dies during that + collection. + NOTE: For performance, we assume stmcb_size_rounded_up(weakref)==16 +*/ +object_t *stm_allocate_weakref(ssize_t size_rounded_up); + /* stm_setup() needs to be called once at the beginning of the program. stm_teardown() can be called at the end, but that's not necessary diff --git a/c7/test/common.py b/c7/test/common.py --- a/c7/test/common.py +++ b/c7/test/common.py @@ -14,7 +14,7 @@ os.path.join(parent_dir, "stmgc.c")] + [ os.path.join(parent_dir, 'stm', _n) for _n in os.listdir(os.path.join(parent_dir, 'stm')) - if _n.endswith('.h') or _n.endswith('.c')] + if (_n.endswith('.h') or _n.endswith('.c')) and not _n.startswith('.')] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -25,6 +25,7 @@ void stm_read(object_t *obj); /*void stm_write(object_t *obj); use _checked_stm_write() instead */ object_t *stm_allocate(ssize_t size_rounded_up); +object_t *stm_allocate_weakref(ssize_t size_rounded_up); object_t *_stm_allocate_old(ssize_t size_rounded_up); void stm_setup(void); @@ -54,6 +55,10 @@ void _set_ptr(object_t *obj, int n, object_t *v); object_t * _get_ptr(object_t *obj, int n); +void _set_weakref(object_t *obj, object_t *v); +object_t* _get_weakref(object_t *obj); + + void _stm_start_safe_point(void); bool _check_stop_safe_point(void); @@ -163,6 +168,21 @@ } +#define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((stm_char *)(wr)) + (sz) - sizeof(void*))) +void _set_weakref(object_t *obj, object_t *v) +{ + char *realobj = _stm_real_address(obj); + ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); + *WEAKREF_PTR(obj, size) = v; +} + +object_t * _get_weakref(object_t *obj) +{ + char *realobj = _stm_real_address(obj); + ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); + return *WEAKREF_PTR(obj, size); +} + void _set_ptr(object_t *obj, int n, object_t *v) { long nrefs = (long)((myobj_t*)obj)->type_id - 421420; @@ -266,6 +286,18 @@ lib._set_type_id(o, tid) return o +def stm_allocate_weakref(point_to_obj, size=None): + assert HDR+WORD == 16 + o = lib.stm_allocate_weakref(HDR + WORD) + + tid = 421420 + lib._set_type_id(o, tid) + lib._set_weakref(o, point_to_obj) + return o + +def stm_get_weakref(o): + return lib._get_weakref(o) + def stm_allocate_refs(n): o = lib.stm_allocate(HDR + n * WORD) tid = 421420 + n diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py new file mode 100644 --- /dev/null +++ b/c7/test/test_weakref.py @@ -0,0 +1,255 @@ +import py +from support import * + + + + +class TestMinorCollection(BaseTest): + def test_simple(self): + lib._stm_set_nursery_free_count(2048) + self.start_transaction() + + self.push_root_no_gc() + lp2 = stm_allocate(48) + lp1 = stm_allocate_weakref(lp2) # no collection here + self.pop_root() + + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + # lp2 died + assert stm_get_weakref(lp1) == ffi.NULL + + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + # lp2 died + assert stm_get_weakref(lp1) == ffi.NULL + + def test_still_simple(self): + lib._stm_set_nursery_free_count(2048) + self.start_transaction() + + self.push_root_no_gc() + lp2 = stm_allocate(48) + lp1 = stm_allocate_weakref(lp2) # no collection here + self.pop_root() + + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + self.push_root(lp2) + stm_minor_collect() + lp2 = self.pop_root() + lp1 = self.pop_root() + # lp2 survived + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + self.push_root(lp2) + stm_minor_collect() + lp2 = self.pop_root() + lp1 = self.pop_root() + # lp2 survived + assert stm_get_weakref(lp1) == lp2 + + def test_weakref_itself_dies(self): + self.start_transaction() + + self.push_root_no_gc() + lp2 = stm_allocate(48) + stm_allocate_weakref(lp2) # no collection here + self.pop_root() + stm_minor_collect() + assert lib._stm_total_allocated() == 0 + + + def test_weakref_old_keep(self): + lp0 = stm_allocate_old(48) + + self.start_transaction() + self.push_root_no_gc() + lp1 = stm_allocate_weakref(lp0) # no collection here + self.pop_root() + + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + + assert stm_get_weakref(lp1) == lp0 + + + def test_abort_cleanup(self): + self.start_transaction() + + self.push_root_no_gc() + lp1 = stm_allocate_weakref(ffi.NULL) # no collection here + self.pop_root() + + self.abort_transaction() + self.start_transaction() + + def test_big_alloc_sizes(self): + sizes = [lib._STM_FAST_ALLOC + 16, 48,] + + for osize in sizes: + self.start_transaction() + self.push_root_no_gc() + lp2 = stm_allocate(osize) + lp1 = stm_allocate_weakref(lp2) # no collection here + self.pop_root() + + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + self.push_root(lp2) + stm_minor_collect() + lp2 = self.pop_root() + lp1 = self.pop_root() + # lp2 survived + assert stm_get_weakref(lp1) == lp2 + self.abort_transaction() + + + def test_multiple_threads(self): + self.start_transaction() + lp0 = stm_allocate(1024) + self.push_root(lp0) + self.commit_transaction() + + self.start_transaction() + lp0 = self.pop_root() + self.push_root(lp0) + stm_write(lp0) # privatize page + + self.push_root_no_gc() + lp2 = stm_allocate(48) + lp1 = stm_allocate_weakref(lp2) # no collection here + self.pop_root() + + self.push_root(lp0) + self.push_root(lp1) + self.commit_transaction() + # lp2 dies + lp1 = self.pop_root() + self.push_root(lp1) + + assert stm_get_weakref(lp1) == ffi.NULL + + self.switch(1) + + self.start_transaction() + assert stm_get_weakref(lp1) == ffi.NULL + + + + +class TestMajorCollection(BaseTest): + def test_simple(self): + self.start_transaction() + + self.push_root_no_gc() + lp2 = stm_allocate(48) + lp1 = stm_allocate_weakref(lp2) # no collection here + self.pop_root() + + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + self.push_root(lp2) + stm_minor_collect() + lp2 = self.pop_root() + lp1 = self.pop_root() + # lp2 survived + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + # lp2 survived because no major collection + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + stm_major_collect() + lp1 = self.pop_root() + # lp2 died + assert stm_get_weakref(lp1) == ffi.NULL + + def test_weakref_old_keep(self): + lp0 = stm_allocate_old(48) + + self.start_transaction() + self.push_root_no_gc() + lp1 = stm_allocate_weakref(lp0) # no collection here + self.pop_root() + + self.push_root(lp1) + stm_major_collect() + lp1 = self.pop_root() + + assert stm_get_weakref(lp1) == lp0 + + def test_survive(self): + self.start_transaction() + + self.push_root_no_gc() + lp2 = stm_allocate(48) + lp1 = stm_allocate_weakref(lp2) # no collection here + self.pop_root() + + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + self.push_root(lp2) + stm_major_collect() + lp2 = self.pop_root() + lp1 = self.pop_root() + # lp2 survived + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + # lp2 survived because no major collection + assert stm_get_weakref(lp1) == lp2 + + self.push_root(lp1) + stm_major_collect() + lp1 = self.pop_root() + # lp2 died + assert stm_get_weakref(lp1) == ffi.NULL + + def test_multiple_threads(self): + self.start_transaction() + lp0 = stm_allocate(48) + lp1 = stm_allocate_weakref(lp0) # no collection here + self.push_root(lp1) + self.push_root(lp0) + self.commit_transaction() + + self.start_transaction() + lp0 = self.pop_root() + lp1 = self.pop_root() + self.push_root(lp1) + + stm_write(lp0) # privatize page with weakref in it too + + assert stm_get_page_flag(stm_get_obj_pages(lp1)[0]) == PRIVATE_PAGE + assert stm_get_weakref(lp1) == lp0 + + self.commit_transaction() + self.start_transaction() + + # lp0 dies + stm_major_collect() + + assert stm_get_weakref(lp1) == ffi.NULL + print stm_get_real_address(lp1) + + self.switch(1) + + self.start_transaction() + assert stm_get_weakref(lp1) == ffi.NULL + print stm_get_real_address(lp1) From noreply at buildbot.pypy.org Thu Mar 13 10:35:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 10:35:41 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Remove an XXX that was already fixed Message-ID: <20140313093541.040271D23DB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69926:470cf84b52d7 Date: 2014-03-13 10:34 +0100 http://bitbucket.org/pypy/pypy/changeset/470cf84b52d7/ Log: Remove an XXX that was already fixed diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -73,7 +73,6 @@ return True def transform_generic_set(self, hop): - # XXX detect if we're inside a 'stm_ignored' block and... do what? assert self.write_barrier_ptr == "stm" opname = hop.spaceop.opname v_struct = hop.spaceop.args[0] @@ -82,6 +81,9 @@ if (v_struct.concretetype.TO._gckind == "gc" and hop.spaceop not in self.clean_sets): if self.in_stm_ignored: + # detect if we're inside a 'stm_ignored' block and in + # that case don't call stm_write(). This only works for + # writing non-GC pointers. if var_needsgc(hop.spaceop.args[-1]): raise Exception("in stm_ignored block: write of a gc " "pointer") From noreply at buildbot.pypy.org Thu Mar 13 11:42:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 11:42:21 +0100 (CET) Subject: [pypy-commit] stmgc default: Prebuilt weakrefs, needed for pypy. Message-ID: <20140313104221.024931C14B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r997:c4e8d6220b74 Date: 2014-03-13 11:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/c4e8d6220b74/ Log: Prebuilt weakrefs, needed for pypy. diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -17,6 +17,16 @@ } +object_t *stm_setup_prebuilt_weakref(object_t *obj) +{ + ssize_t size = 16; + + obj = stm_setup_prebuilt(obj); + *WEAKREF_PTR(obj, size) = stm_setup_prebuilt(*WEAKREF_PTR(obj, size)); + return obj; +} + + static void _set_weakref_in_all_segments(object_t *weakref, object_t *value) { ssize_t size = 16; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -282,6 +282,9 @@ static structure, but it should never be used anyway.) */ object_t *stm_setup_prebuilt(object_t *); +/* The same, if the prebuilt object is actually a weakref. */ +object_t *stm_setup_prebuilt_weakref(object_t *); + /* Hash, id. The id is just the address of the object (of the address where it *will* be after the next minor collection). The hash is the same, mangled -- except on prebuilt objects, where it can be diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -33,6 +33,7 @@ void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); object_t *stm_setup_prebuilt(object_t *); +object_t *stm_setup_prebuilt_weakref(object_t *); bool _checked_stm_write(object_t *obj); bool _stm_was_read(object_t *obj); diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -253,3 +253,21 @@ self.start_transaction() assert stm_get_weakref(lp1) == ffi.NULL print stm_get_real_address(lp1) + + def test_prebuit_weakref(self): + from test_prebuilt import prebuilt + static1 = prebuilt(16) # a prebuit dead weakref + lp1 = lib.stm_setup_prebuilt_weakref(static1) + static2 = prebuilt(16) # some random prebuilt object + ffi.cast("char *", static2)[8:11] = 'ABC' + lp2 = lib.stm_setup_prebuilt(static2) + static3 = prebuilt(16) # a prebuilt weakref to static2 + ffi.cast("object_t **", static3)[1] = static2 + lp3 = lib.stm_setup_prebuilt_weakref(static3) + # + self.start_transaction() + assert stm_get_char(lp2, 8) == 'A' + assert stm_get_char(lp2, 9) == 'B' + assert stm_get_char(lp2, 10) == 'C' + assert stm_get_weakref(lp1) == ffi.NULL + assert stm_get_weakref(lp3) == lp2 From noreply at buildbot.pypy.org Thu Mar 13 13:26:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 13:26:47 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/ae62acdb5d7c Message-ID: <20140313122647.57A601C08C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69927:d9489fac1c0e Date: 2014-03-13 10:41 +0100 http://bitbucket.org/pypy/pypy/changeset/d9489fac1c0e/ Log: import stmgc/ae62acdb5d7c diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -c6ed145863b4 +ae62acdb5d7c diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -181,6 +181,7 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(list_is_empty(STM_PSEGMENT->young_weakrefs)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)); @@ -484,6 +485,7 @@ /* reset these lists to NULL too on abort */ LIST_FREE(pseg->objects_pointing_to_nursery); LIST_FREE(pseg->large_overflow_objects); + list_clear(pseg->young_weakrefs); } static void abort_with_mutex(void) diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -62,7 +62,7 @@ current transaction that have been flushed out of the nursery, which occurs if the same transaction allocates too many objects. */ - GCFLAG_OVERFLOW_NUMBER_bit0 = 0x08 /* must be last */ + GCFLAG_OVERFLOW_NUMBER_bit0 = 0x8 /* must be last */ }; @@ -106,6 +106,15 @@ next minor collection. */ struct tree_s *nursery_objects_shadows; + /* List of all young weakrefs to check in minor collections. These + are the only weakrefs that may point to young objects and never + contain NULL. */ + struct list_s *young_weakrefs; + + /* List of all old weakrefs to check in major collections. These + weakrefs never point to young objects and never contain NULL. */ + struct list_s *old_weakrefs; + /* Tree of 'key->callback' associations from stm_call_on_abort() */ struct tree_s *callbacks_on_abort; diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -451,7 +451,11 @@ /* 'objects_pointing_to_nursery' should be empty, but isn't necessarily because it also lists objects that have been written to but don't actually point to the nursery. Clear - it up and set GCFLAG_WRITE_BARRIER again on the objects. */ + it up and set GCFLAG_WRITE_BARRIER again on the objects. + This is the case for transactions where + MINOR_NOTHING_TO_DO() == false + but they still did write-barriers on objects + */ lst = pseg->objects_pointing_to_nursery; if (lst != NULL) { LIST_FOREACH_R(lst, uintptr_t /*item*/, @@ -538,6 +542,9 @@ mark_visit_from_roots(); LIST_FREE(mark_objects_to_trace); + /* weakrefs: */ + stm_visit_old_weakrefs(); + /* cleanup */ clean_up_segment_lists(); diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -300,6 +300,9 @@ collect_oldrefs_to_nursery(); + /* now all surviving nursery objects have been moved out */ + stm_move_young_weakrefs(); + throw_away_nursery(get_priv_segment(STM_SEGMENT->segment_num)); assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -58,6 +58,8 @@ pr->objects_pointing_to_nursery = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); + pr->young_weakrefs = list_create(); + pr->old_weakrefs = list_create(); pr->young_outside_nursery = tree_create(); pr->nursery_objects_shadows = tree_create(); pr->callbacks_on_abort = tree_create(); @@ -96,6 +98,8 @@ assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); + list_free(pr->young_weakrefs); + list_free(pr->old_weakrefs); tree_free(pr->young_outside_nursery); tree_free(pr->nursery_objects_shadows); tree_free(pr->callbacks_on_abort); diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c --- a/rpython/translator/stm/src_stm/stmgc.c +++ b/rpython/translator/stm/src_stm/stmgc.c @@ -13,6 +13,7 @@ #include "stm/contention.h" #include "stm/extra.h" #include "stm/fprintcolor.h" +#include "stm/weakref.h" #include "stm/misc.c" #include "stm/list.c" @@ -29,3 +30,4 @@ #include "stm/contention.c" #include "stm/extra.c" #include "stm/fprintcolor.c" +#include "stm/weakref.c" diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -195,6 +195,17 @@ return (object_t *)p; } +/* Allocate a weakref object. Weakref objects have a + reference to an object at the byte-offset + stmcb_size_rounded_up(obj) - sizeof(void*) + You must assign the reference before the next collection may happen. + After that, you must not mutate the reference anymore. However, + it can become NULL after any GC if the reference dies during that + collection. + NOTE: For performance, we assume stmcb_size_rounded_up(weakref)==16 +*/ +object_t *stm_allocate_weakref(ssize_t size_rounded_up); + /* stm_setup() needs to be called once at the beginning of the program. stm_teardown() can be called at the end, but that's not necessary From noreply at buildbot.pypy.org Thu Mar 13 13:26:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 13:26:48 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Support weakrefs. Missing prebuilt weakrefs so far. Message-ID: <20140313122648.BA1A71C08C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69928:b341c6bd5059 Date: 2014-03-13 11:42 +0100 http://bitbucket.org/pypy/pypy/changeset/b341c6bd5059/ Log: Support weakrefs. Missing prebuilt weakrefs so far. diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -65,9 +65,10 @@ # XXX finalizers are ignored for now #ll_assert(not needs_finalizer, 'XXX needs_finalizer') #ll_assert(not is_finalizer_light, 'XXX is_finalizer_light') - ll_assert(not contains_weakptr, 'contains_weakptr: use malloc_weakref') if size < 16: size = 16 # minimum size (test usually constant-folded) + if contains_weakptr: # check constant-folded + return llop.stm_allocate_weakref(llmemory.GCREF, size, typeid16) return llop.stm_allocate_tid(llmemory.GCREF, size, typeid16) def malloc_varsize_clear(self, typeid16, length, size, itemsize, @@ -79,11 +80,6 @@ llop.stm_set_into_obj(lltype.Void, result, offset_to_length, length) return result - def malloc_weakref(self, typeid16, size, obj): - raise NotImplementedError # XXX - return llop.stm_weakref_allocate(llmemory.GCREF, size, - typeid16, obj) - def can_optimize_clean_setarrayitems(self): return False diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -945,7 +945,15 @@ # cast_ptr_to_adr must be done after malloc, as the GC pointer # might have moved just now. v_instance, = op.args - v_addr = hop.genop("cast_ptr_to_adr", [v_instance], + if self.translator.config.translation.stm: + # not untranslated-test-friendly, but good enough: we hide + # our GC object inside an Address field. It's not a correct + # "void *" address, as it's in the wrong address_space, but + # we will force_cast it again in weakref_deref(). + opname = "force_cast" + else: + opname = "cast_ptr_to_adr" + v_addr = hop.genop(opname, [v_instance], resulttype=llmemory.Address) hop.genop("bare_setfield", [v_result, rmodel.inputconst(lltype.Void, "weakptr"), v_addr]) @@ -958,7 +966,12 @@ v_addr = hop.genop("direct_call", [self.weakref_deref_ptr, v_wref], resulttype=llmemory.Address) - hop.cast_result(v_addr) + if self.translator.config.translation.stm: + # see gct_weakref_create() + hop.genop("force_cast", [v_addr], + resultvar=hop.spaceop.result) + else: + hop.cast_result(v_addr) def gct_gc_identityhash(self, hop): livevars = self.push_roots(hop) diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -15,13 +15,6 @@ s_gc, s_typeid16) gc = self.gcdata.gc # - s_gcref = llannotation.SomePtr(llmemory.GCREF) - - self.malloc_weakref_ptr = self._getfn( - GCClass.malloc_weakref.im_func, - [s_gc, s_typeid16, annmodel.SomeInteger(nonneg=True), - s_gcref], s_gcref) - # def pypy_stmcb_size_rounded_up(obj): return gc.get_size(obj) pypy_stmcb_size_rounded_up.c_name = "pypy_stmcb_size_rounded_up" @@ -98,40 +91,6 @@ def gct_gc_adr_of_root_stack_top(self, hop): hop.genop("stm_get_root_stack_top", [], resultvar=hop.spaceop.result) - def gct_weakref_create(self, hop): - XXX - op = hop.spaceop - - type_id = self.get_type_id(WEAKREF) - - c_type_id = rmodel.inputconst(TYPE_ID, type_id) - info = self.layoutbuilder.get_info(type_id) - c_size = rmodel.inputconst(lltype.Signed, info.fixedsize) - malloc_ptr = self.malloc_weakref_ptr - c_null = rmodel.inputconst(llmemory.Address, llmemory.NULL) - args = [self.c_const_gc, c_type_id, c_size, c_null] - # XXX: for now, set weakptr ourselves and simply pass NULL - - # push and pop the current live variables *including* the argument - # to the weakref_create operation, which must be kept alive and - # moved if the GC needs to collect - livevars = self.push_roots(hop, keep_current_args=True) - v_result = hop.genop("direct_call", [malloc_ptr] + args, - resulttype=llmemory.GCREF) - v_result = hop.genop("cast_opaque_ptr", [v_result], - resulttype=WEAKREFPTR) - self.pop_roots(hop, livevars) - # cast_ptr_to_adr must be done after malloc, as the GC pointer - # might have moved just now. - v_instance, = op.args - v_addr = hop.genop("cast_ptr_to_adr", [v_instance], - resulttype=llmemory.Address) - hop.genop("bare_setfield", - [v_result, rmodel.inputconst(lltype.Void, "weakptr"), v_addr]) - v_weakref = hop.genop("cast_ptr_to_weakrefptr", [v_result], - resulttype=llmemory.WeakRefPtr) - hop.cast_result(v_weakref) - ## def _gct_with_roots_pushed(self, hop): ## livevars = self.push_roots(hop) ## self.default(hop) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -417,6 +417,7 @@ 'stm_write': LLOp(), 'stm_can_move': LLOp(), 'stm_allocate_tid': LLOp(sideeffects=False, canmallocgc=True), + 'stm_allocate_weakref': LLOp(sideeffects=False, canmallocgc=True), 'stm_get_from_obj': LLOp(sideeffects=False), 'stm_get_from_obj_const': LLOp(canfold=True), 'stm_set_into_obj': LLOp(), diff --git a/rpython/translator/c/primitive.py b/rpython/translator/c/primitive.py --- a/rpython/translator/c/primitive.py +++ b/rpython/translator/c/primitive.py @@ -170,7 +170,7 @@ def name_address(value, db): if value: - return db.get(value.ref()) + return '((void *)%s)' % (db.get(value.ref(), static=True),) else: return 'NULL' diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -77,6 +77,14 @@ return ('%s = (rpygcchar_t *)stm_allocate(%s); ' % (result, arg_size) + '((rpyobj_t *)%s)->tid = %s;' % (result, arg_type_id)) +def stm_allocate_weakref(funcgen, op): + arg_size = funcgen.expr(op.args[0]) + arg_type_id = funcgen.expr(op.args[1]) + result = funcgen.expr(op.result) + # XXX NULL returns? + return ('%s = (rpygcchar_t *)stm_allocate_weakref(%s); ' % (result, arg_size) + + '((rpyobj_t *)%s)->tid = %s;' % (result, arg_type_id)) + def stm_get_from_obj(funcgen, op): assert op.args[0].concretetype == llmemory.GCREF arg_obj = funcgen.expr(op.args[0]) diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -304,6 +304,22 @@ data = cbuilder.cmdexec('a b') assert 'test ok\n' in data + def test_prebuilt_weakref(self): + import weakref + class Foo(object): + pass + foo = Foo(); foo.n = 42 + wr = weakref.ref(foo) + + def main(argv): + wr().n += 1 + print '<', wr().n, '>' + return 0 + + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('') + assert '< 43 >\n' in data + def test_stm_pointer_equal(self): class Foo: pass From noreply at buildbot.pypy.org Thu Mar 13 13:26:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 13:26:50 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/c4e8d6220b74. Add two missing files. Message-ID: <20140313122650.01DEE1C08C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69929:d5cd283ea582 Date: 2014-03-13 11:44 +0100 http://bitbucket.org/pypy/pypy/changeset/d5cd283ea582/ Log: import stmgc/c4e8d6220b74. Add two missing files. diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -ae62acdb5d7c +c4e8d6220b74 diff --git a/rpython/translator/stm/src_stm/stm/weakref.c b/rpython/translator/stm/src_stm/stm/weakref.c new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/weakref.c @@ -0,0 +1,149 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + +#define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((stm_char *)(wr)) + (sz) - sizeof(void*))) + +object_t *stm_allocate_weakref(ssize_t size_rounded_up) +{ + OPT_ASSERT(size_rounded_up > sizeof(struct object_s)); + OPT_ASSERT(size_rounded_up == 16); /* no reason for it to be anything else */ + + object_t *obj = stm_allocate(size_rounded_up); + assert(_is_in_nursery(obj)); /* because it's so small */ + + LIST_APPEND(STM_PSEGMENT->young_weakrefs, obj); + return obj; +} + + +object_t *stm_setup_prebuilt_weakref(object_t *obj) +{ + ssize_t size = 16; + + obj = stm_setup_prebuilt(obj); + *WEAKREF_PTR(obj, size) = stm_setup_prebuilt(*WEAKREF_PTR(obj, size)); + return obj; +} + + +static void _set_weakref_in_all_segments(object_t *weakref, object_t *value) +{ + ssize_t size = 16; + + stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size); + if (flag_page_private[(uintptr_t)point_to_loc / 4096UL] == PRIVATE_PAGE) { + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + char *base = get_segment_base(i); /* two different segments */ + + object_t ** ref_loc = (object_t **)REAL_ADDRESS(base, point_to_loc); + *ref_loc = value; + } + } + else { + *WEAKREF_PTR(weakref, size) = value; + } +} + +/***** Minor collection *****/ + +static void stm_move_young_weakrefs() +{ + /* The code relies on the fact that no weakref can be an old object + weakly pointing to a young object. Indeed, weakrefs are immutable + so they cannot point to an object that was created after it. + */ + LIST_FOREACH_R( + STM_PSEGMENT->young_weakrefs, + object_t * /*item*/, + ({ + /* weakrefs are so small, they always are in the nursery. Never + a young outside nursery object. */ + assert(_is_in_nursery(item)); + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)item; + + /* the following checks are done like in nursery.c: */ + if (!(item->stm_flags & GCFLAG_HAS_SHADOW) + || (pforwarded_array[0] != GCWORD_MOVED)) { + /* weakref dies */ + continue; + } + + item = pforwarded_array[1]; /* moved location */ + + assert(!_is_young(item)); + + ssize_t size = 16; + object_t *pointing_to = *WEAKREF_PTR(item, size); + assert(pointing_to != NULL); + + if (_is_in_nursery(pointing_to)) { + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)pointing_to; + /* the following checks are done like in nursery.c: */ + if (!(pointing_to->stm_flags & GCFLAG_HAS_SHADOW) + || (pforwarded_array[0] != GCWORD_MOVED)) { + /* pointing_to dies */ + _set_weakref_in_all_segments(item, NULL); + continue; /* no need to remember in old_weakrefs */ + } + else { + /* moved location */ + _set_weakref_in_all_segments(item, pforwarded_array[1]); + } + } + else { + /* young outside nursery object or old object */ + if (tree_contains(STM_PSEGMENT->young_outside_nursery, + (uintptr_t)pointing_to)) { + /* still in the tree -> wasn't seen by the minor collection, + so it doesn't survive */ + _set_weakref_in_all_segments(item, NULL); + continue; /* no need to remember in old_weakrefs */ + } + /* pointing_to was already old */ + } + LIST_APPEND(STM_PSEGMENT->old_weakrefs, item); + })); + list_clear(STM_PSEGMENT->young_weakrefs); +} + + +/***** Major collection *****/ + + +static void stm_visit_old_weakrefs(void) +{ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + struct list_s *lst; + + lst = pseg->old_weakrefs; + uintptr_t n = list_count(lst); + while (n > 0) { + object_t *weakref = (object_t *)list_item(lst, --n); + if (!mark_visited_test(weakref)) { + /* weakref dies */ + list_set_item(lst, n, list_pop_item(lst)); + continue; + } + + ssize_t size = 16; + object_t *pointing_to = *WEAKREF_PTR(weakref, size); + assert(pointing_to != NULL); + if (!mark_visited_test(pointing_to)) { + //assert(flag_page_private[(uintptr_t)weakref / 4096UL] != PRIVATE_PAGE); + _set_weakref_in_all_segments(weakref, NULL); + + /* we don't need it in this list anymore */ + list_set_item(lst, n, list_pop_item(lst)); + continue; + } + else { + /* it survives! */ + } + } + } +} diff --git a/rpython/translator/stm/src_stm/stm/weakref.h b/rpython/translator/stm/src_stm/stm/weakref.h new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/weakref.h @@ -0,0 +1,10 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#ifndef _SRCSTM_WEAKREF_H +#define _SRCSTM_WEAKREF_H + +object_t *stm_allocate_weakref(ssize_t size_rounded_up); +static void stm_move_young_weakrefs(void); +static void stm_visit_old_weakrefs(void); + + +#endif diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -283,6 +283,9 @@ static structure, but it should never be used anyway.) */ object_t *stm_setup_prebuilt(object_t *); +/* The same, if the prebuilt object is actually a weakref. */ +object_t *stm_setup_prebuilt_weakref(object_t *); + /* Hash, id. The id is just the address of the object (of the address where it *will* be after the next minor collection). The hash is the same, mangled -- except on prebuilt objects, where it can be From noreply at buildbot.pypy.org Thu Mar 13 13:26:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 13:26:51 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: (Remi, arigo) Prebuilt weakrefs. Message-ID: <20140313122651.3530F1C08C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69930:9b817ede3084 Date: 2014-03-13 11:56 +0100 http://bitbucket.org/pypy/pypy/changeset/9b817ede3084/ Log: (Remi, arigo) Prebuilt weakrefs. diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -873,13 +873,28 @@ h = database.gcpolicy.get_stm_prebuilt_hash(node.obj) print >> f, '\t%s,' % (name_signed(h, database),) print >> f, '};' + print >> f + print >> f, 'static int weakref_indices[] = {' + for i, (_, node) in enumerate(gclist): + if getattr(node, 'is_weakref', False): + print >> f, '\t%d,' % (i,) + print >> f, '\t-1' + print >> f, '};' print >> f, ''' void pypy_stm_setup_prebuilt(void) { object_t **pp = rpy_prebuilt; long *ph = rpy_prebuilt_hashes; - for ( ; *pp; pp++, ph++) { - *pp = stm_setup_prebuilt(*pp); + int i = 0; + int *wri = weakref_indices; + for ( ; *pp; pp++, ph++, i++) { + if (i == *wri) { + *pp = stm_setup_prebuilt_weakref(*pp); + wri++; + } + else { + *pp = stm_setup_prebuilt(*pp); + } stm_set_prebuilt_identityhash(*pp, *ph); } diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -592,7 +592,7 @@ class StructNode(ContainerNode): nodekind = 'struct' if USESLOTS: - __slots__ = () + __slots__ = ('is_weakref',) def basename(self): T = self.getTYPE() @@ -1029,7 +1029,9 @@ wrapper = db.gcpolicy.convert_weakref_to(ptarget) container = wrapper._obj #obj._converted_weakref = container # hack for genllvm :-/ - return db.getcontainernode(container, _dont_write_c_code=False) + node = db.getcontainernode(container, _dont_write_c_code=False) + node.is_weakref = True + return node class GroupNode(ContainerNode): nodekind = 'group' From noreply at buildbot.pypy.org Thu Mar 13 15:09:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 15:09:36 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Workaround for another clang issue Message-ID: <20140313140936.C53CA1C01FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69931:598224bb2b1c Date: 2014-03-13 15:08 +0100 http://bitbucket.org/pypy/pypy/changeset/598224bb2b1c/ Log: Workaround for another clang issue diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -949,7 +949,10 @@ # not untranslated-test-friendly, but good enough: we hide # our GC object inside an Address field. It's not a correct # "void *" address, as it's in the wrong address_space, but - # we will force_cast it again in weakref_deref(). + # we will force_cast it again in weakref_deref(). Note that + # it's done in two steps as a workaround for a clang issue(?). + v_instance = hop.genop("force_cast", [v_instance], + resulttype=lltype.Signed) opname = "force_cast" else: opname = "cast_ptr_to_adr" @@ -968,6 +971,8 @@ resulttype=llmemory.Address) if self.translator.config.translation.stm: # see gct_weakref_create() + v_addr = hop.genop("force_cast", [v_addr], + resulttype=lltype.Signed) hop.genop("force_cast", [v_addr], resultvar=hop.spaceop.result) else: From noreply at buildbot.pypy.org Thu Mar 13 15:38:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 15:38:31 +0100 (CET) Subject: [pypy-commit] stmgc default: Another llvm "fix", in quotes, because it might be bogus. Message-ID: <20140313143831.D82A41D23C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r998:321767c428c6 Date: 2014-03-13 15:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/321767c428c6/ Log: Another llvm "fix", in quotes, because it might be bogus. If 'a' is a global with an address_space(256), then this fix allows us to use "(void *)(long)&a" in prebuilt structs. It looks bogus because "(void *)&a" still doesn't work, with clang giving the error "error: initializer element is not a compile-time constant". Too bad, we can always generate the first variant in pypy and be happy. diff --git a/c7/llvmfix/addrspacecast-in-constant.diff b/c7/llvmfix/addrspacecast-in-constant.diff new file mode 100644 --- /dev/null +++ b/c7/llvmfix/addrspacecast-in-constant.diff @@ -0,0 +1,13 @@ +Index: lib/CodeGen/AsmPrinter/AsmPrinter.cpp +=================================================================== +--- lib/CodeGen/AsmPrinter/AsmPrinter.cpp (revision 203791) ++++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp (working copy) +@@ -1529,6 +1529,8 @@ + Ctx); + } + ++ case Instruction::AddrSpaceCast: ++ // XXX??? I *think* it is correct at least in PyPy's case + case Instruction::Trunc: + // We emit the value and depend on the assembler to truncate the generated + // expression properly. This is important for differences between From noreply at buildbot.pypy.org Thu Mar 13 15:41:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 15:41:19 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix to match 321767c428c6 in the stmgc repo Message-ID: <20140313144119.B96A41D2351@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69932:1f2c5fb9f1ad Date: 2014-03-13 15:40 +0100 http://bitbucket.org/pypy/pypy/changeset/1f2c5fb9f1ad/ Log: Fix to match 321767c428c6 in the stmgc repo diff --git a/rpython/translator/c/primitive.py b/rpython/translator/c/primitive.py --- a/rpython/translator/c/primitive.py +++ b/rpython/translator/c/primitive.py @@ -170,7 +170,12 @@ def name_address(value, db): if value: - return '((void *)%s)' % (db.get(value.ref(), static=True),) + res = db.get(value.ref(), static=True) + if res == db.get(value.ref(), static=False): + return res # common case + else: + # mess with stm address spaces + return '((void *)(long)%s)' % (res,) else: return 'NULL' From noreply at buildbot.pypy.org Thu Mar 13 16:48:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 16:48:43 +0100 (CET) Subject: [pypy-commit] pypy default: Add a @signature to this function too Message-ID: <20140313154843.6FE9C1D23CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69933:6c99fe082851 Date: 2014-03-13 16:47 +0100 http://bitbucket.org/pypy/pypy/changeset/6c99fe082851/ Log: Add a @signature to this function too diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -108,6 +108,8 @@ copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name) @jit.dont_look_inside + @signature(types.any(), types.any(), types.int(), types.int(), + returns=types.none()) def copy_raw_to_string(ptrsrc, dst, dststart, length): # xxx Warning: same note as above apply: don't do this at home assert length >= 0 From noreply at buildbot.pypy.org Thu Mar 13 19:51:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 19:51:38 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix Message-ID: <20140313185138.321331C347E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69934:1b58f7c6b75c Date: 2014-03-13 19:50 +0100 http://bitbucket.org/pypy/pypy/changeset/1b58f7c6b75c/ Log: Fix diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -71,8 +71,7 @@ v_struct = hop.spaceop.args[0] assert opname in ('setfield', 'setarrayitem', 'setinteriorfield', 'raw_store') - if (v_struct.concretetype.TO._gckind == "gc" - and hop.spaceop not in self.clean_sets): + if var_needsgc(v_struct) and hop.spaceop not in self.clean_sets: if self.in_stm_ignored: # detect if we're inside a 'stm_ignored' block and in # that case don't call stm_write(). This only works for From noreply at buildbot.pypy.org Thu Mar 13 20:55:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 20:55:20 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix rrawarray for stm Message-ID: <20140313195520.16E271C347E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69935:286aec3bffff Date: 2014-03-13 20:52 +0100 http://bitbucket.org/pypy/pypy/changeset/286aec3bffff/ Log: Fix rrawarray for stm diff --git a/rpython/rlib/rrawarray.py b/rpython/rlib/rrawarray.py --- a/rpython/rlib/rrawarray.py +++ b/rpython/rlib/rrawarray.py @@ -1,6 +1,6 @@ from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib.objectmodel import specialize -from rpython.rlib import jit +from rpython.rlib import jit, rgc from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.tool.pairtype import pair @@ -50,6 +50,10 @@ @jit.dont_look_inside def ll_copy_list_to_raw_array(ll_list, dst_ptr): + if rgc.stm_is_enabled(): + for i in range(ll_list.ll_length()): + dst_ptr[i] = ll_list.ll_getitem_fast(i) + return # this code is delicate: we must ensure that there are no GC operations # around the call to raw_memcopy # @@ -64,9 +68,13 @@ @jit.dont_look_inside def ll_populate_list_from_raw_array(ll_list, src_ptr, length): + ll_list._ll_resize(length) + if rgc.stm_is_enabled(): + for i in range(length): + ll_list.ll_setitem_fast(i, src_ptr[i]) + return ITEM = lltype.typeOf(src_ptr).TO.OF size = llmemory.sizeof(ITEM) * length - ll_list._ll_resize(length) # start of no-GC section src_adr = get_raw_buf(src_ptr) dst_adr = get_raw_buf(ll_list.ll_items()) diff --git a/rpython/rlib/test/test_rrawarray.py b/rpython/rlib/test/test_rrawarray.py --- a/rpython/rlib/test/test_rrawarray.py +++ b/rpython/rlib/test/test_rrawarray.py @@ -2,11 +2,24 @@ populate_list_from_raw_array from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.test.test_llinterp import clear_tcache +from rpython.rlib import rgc class TestRArray(BaseRtypingTest): + def interpret_twice(self, f, args): + self.interpret(f, args) + # + old = rgc.stm_is_enabled + try: + rgc.stm_is_enabled = lambda: True + clear_tcache() + self.interpret(f, args) + finally: + rgc.stm_is_enabled = old + def test_copy_list_to_raw_array(self): ARRAY = rffi.CArray(lltype.Signed) buf = lltype.malloc(ARRAY, 4, flavor='raw') @@ -35,7 +48,7 @@ # lltype.free(buf, flavor='raw') lltype.free(buf2, flavor='raw') - self.interpret(fn, []) + self.interpret_twice(fn, []) def test_new_list_from_raw_array(self): INTARRAY = rffi.CArray(lltype.Signed) @@ -62,4 +75,4 @@ assert lst == [1, 2, 3, 4] lltype.free(buf, flavor='raw') # - self.interpret(fn, []) + self.interpret_twice(fn, []) From noreply at buildbot.pypy.org Thu Mar 13 20:55:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 20:55:21 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: merge heads Message-ID: <20140313195521.4ECE51C347E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69936:1329620f4b26 Date: 2014-03-13 20:53 +0100 http://bitbucket.org/pypy/pypy/changeset/1329620f4b26/ Log: merge heads diff --git a/rpython/rlib/rrawarray.py b/rpython/rlib/rrawarray.py --- a/rpython/rlib/rrawarray.py +++ b/rpython/rlib/rrawarray.py @@ -1,6 +1,6 @@ from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib.objectmodel import specialize -from rpython.rlib import jit +from rpython.rlib import jit, rgc from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.tool.pairtype import pair @@ -50,6 +50,10 @@ @jit.dont_look_inside def ll_copy_list_to_raw_array(ll_list, dst_ptr): + if rgc.stm_is_enabled(): + for i in range(ll_list.ll_length()): + dst_ptr[i] = ll_list.ll_getitem_fast(i) + return # this code is delicate: we must ensure that there are no GC operations # around the call to raw_memcopy # @@ -64,9 +68,13 @@ @jit.dont_look_inside def ll_populate_list_from_raw_array(ll_list, src_ptr, length): + ll_list._ll_resize(length) + if rgc.stm_is_enabled(): + for i in range(length): + ll_list.ll_setitem_fast(i, src_ptr[i]) + return ITEM = lltype.typeOf(src_ptr).TO.OF size = llmemory.sizeof(ITEM) * length - ll_list._ll_resize(length) # start of no-GC section src_adr = get_raw_buf(src_ptr) dst_adr = get_raw_buf(ll_list.ll_items()) diff --git a/rpython/rlib/test/test_rrawarray.py b/rpython/rlib/test/test_rrawarray.py --- a/rpython/rlib/test/test_rrawarray.py +++ b/rpython/rlib/test/test_rrawarray.py @@ -2,11 +2,24 @@ populate_list_from_raw_array from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.test.test_llinterp import clear_tcache +from rpython.rlib import rgc class TestRArray(BaseRtypingTest): + def interpret_twice(self, f, args): + self.interpret(f, args) + # + old = rgc.stm_is_enabled + try: + rgc.stm_is_enabled = lambda: True + clear_tcache() + self.interpret(f, args) + finally: + rgc.stm_is_enabled = old + def test_copy_list_to_raw_array(self): ARRAY = rffi.CArray(lltype.Signed) buf = lltype.malloc(ARRAY, 4, flavor='raw') @@ -35,7 +48,7 @@ # lltype.free(buf, flavor='raw') lltype.free(buf2, flavor='raw') - self.interpret(fn, []) + self.interpret_twice(fn, []) def test_new_list_from_raw_array(self): INTARRAY = rffi.CArray(lltype.Signed) @@ -62,4 +75,4 @@ assert lst == [1, 2, 3, 4] lltype.free(buf, flavor='raw') # - self.interpret(fn, []) + self.interpret_twice(fn, []) From noreply at buildbot.pypy.org Thu Mar 13 21:00:26 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 13 Mar 2014 21:00:26 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix from last merge Message-ID: <20140313200026.DA4D21D23CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69937:6050dbbb47e6 Date: 2014-03-13 12:58 -0700 http://bitbucket.org/pypy/pypy/changeset/6050dbbb47e6/ Log: fix from last merge diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -288,7 +288,6 @@ RegrTest('test_nis.py'), RegrTest('test_nntplib.py'), RegrTest('test_normalization.py'), - RegrTest('test_nntplib.py'), RegrTest('test_ntpath.py'), RegrTest('test_numeric_tower.py'), RegrTest('test_opcodes.py', core=True), @@ -405,7 +404,7 @@ RegrTest('test_timeout.py'), RegrTest('test_tk.py'), RegrTest('test_tokenize.py'), - RegrTest('test_tools.py'), + #RegrTest('test_tools.py'), RegrTest('test_trace.py'), RegrTest('test_traceback.py', core=True), RegrTest('test_ttk_guionly.py'), From noreply at buildbot.pypy.org Thu Mar 13 22:29:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 22:29:38 +0100 (CET) Subject: [pypy-commit] stmgc default: extend this llvm fix to another function too Message-ID: <20140313212938.13C001D23CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r999:00f803e7c734 Date: 2014-03-13 22:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/00f803e7c734/ Log: extend this llvm fix to another function too diff --git a/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff b/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff --- a/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff +++ b/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff @@ -1,8 +1,8 @@ Index: lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp =================================================================== ---- lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (revision 199602) +--- lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (revision 203791) +++ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (working copy) -@@ -295,6 +295,17 @@ +@@ -299,6 +299,17 @@ Type *SrcPTy = SrcTy->getElementType(); @@ -20,3 +20,16 @@ if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() || DestPTy->isVectorTy()) { // If the source is an array, the code below will not succeed. Check to +@@ -510,6 +521,12 @@ + IC.getDataLayout()->getTypeSizeInBits(DestPTy)) + return 0; + ++ // XXX this is similar to the issue in InstCombineLoadCast ++ if (SrcPTy->isPointerTy() && DestPTy->isPointerTy() && ++ cast(DestPTy)->getAddressSpace() != ++ cast(SrcPTy)->getAddressSpace()) ++ return 0; ++ + // Okay, we are casting from one integer or pointer type to another of + // the same size. Instead of casting the pointer before + // the store, cast the value to be stored. From noreply at buildbot.pypy.org Thu Mar 13 23:05:44 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 13 Mar 2014 23:05:44 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140313220544.F27731C347E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69938:be547e30df19 Date: 2014-03-13 18:01 -0400 http://bitbucket.org/pypy/pypy/changeset/be547e30df19/ Log: cleanup diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -58,10 +58,10 @@ elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, - backstrides, storage=storage) + backstrides, storage=storage) else: impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, - backstrides, storage) + backstrides, storage) if w_subtype: w_ret = space.allocate_instance(W_NDimArray, w_subtype) W_NDimArray.__init__(w_ret, impl) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -87,7 +87,8 @@ value[0] = self.value builder = StringBuilder() - builder.append_charpsize(rffi.cast(rffi.CCHARP, value), rffi.sizeof(lltype.typeOf(self.value))) + builder.append_charpsize(rffi.cast(rffi.CCHARP, value), + rffi.sizeof(lltype.typeOf(self.value))) ret = builder.build() lltype.free(value, flavor="raw") @@ -117,7 +118,8 @@ value[1] = self.imag builder = StringBuilder() - builder.append_charpsize(rffi.cast(rffi.CCHARP, value), rffi.sizeof(lltype.typeOf(self.real)) * 2) + builder.append_charpsize(rffi.cast(rffi.CCHARP, value), + rffi.sizeof(lltype.typeOf(self.real)) * 2) ret = builder.build() lltype.free(value, flavor="raw") @@ -186,27 +188,27 @@ dtype = self.get_dtype(space) return space.wrap(dtype.itemtype.bool(self)) + def _unaryop_impl(ufunc_name): + def impl(self, space, w_out=None): + from pypy.module.micronumpy import ufuncs + return getattr(ufuncs.get(space), ufunc_name).call( + space, [self, w_out]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) + def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import ufuncs - return getattr(ufuncs.get(space), ufunc_name).call(space, - [self, w_other, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call( + space, [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import ufuncs - return getattr(ufuncs.get(space), ufunc_name).call(space, - [w_other, self, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call( + space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) - def _unaryop_impl(ufunc_name): - def impl(self, space, w_out=None): - from pypy.module.micronumpy import ufuncs - return getattr(ufuncs.get(space), ufunc_name).call(space, - [self, w_out]) - return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) - descr_add = _binop_impl("add") descr_sub = _binop_impl("subtract") descr_mul = _binop_impl("multiply") From noreply at buildbot.pypy.org Thu Mar 13 23:05:46 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 13 Mar 2014 23:05:46 +0100 (CET) Subject: [pypy-commit] pypy default: export another constant Message-ID: <20140313220546.5540B1C347E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69939:704d2849814c Date: 2014-03-12 17:39 -0400 http://bitbucket.org/pypy/pypy/changeset/704d2849814c/ Log: export another constant diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -24,7 +24,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', } - for c in ['CLIP', 'WRAP', 'RAISE']: + for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -1,3 +1,5 @@ +MAXDIMS = 32 + BOOL = 0 BYTE = 1 UBYTE = 2 diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -248,6 +248,7 @@ def test_constants(self): import numpy as np + assert np.MAXDIMS is 32 assert np.CLIP is 0 assert np.WRAP is 1 assert np.RAISE is 2 From noreply at buildbot.pypy.org Thu Mar 13 23:05:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 13 Mar 2014 23:05:47 +0100 (CET) Subject: [pypy-commit] pypy default: avoid loops in numpy array creation Message-ID: <20140313220547.99D641C347E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69940:bb85257cb544 Date: 2014-03-13 15:17 -0400 http://bitbucket.org/pypy/pypy/changeset/bb85257cb544/ Log: avoid loops in numpy array creation diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -5,7 +5,6 @@ from pypy.module.micronumpy import descriptor, loop, ufuncs from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.converters import shape_converter -from pypy.module.micronumpy.strides import find_shape_and_elems def build_scalar(space, w_dtype, w_state): @@ -27,6 +26,8 @@ @unwrap_spec(ndmin=int, copy=bool, subok=bool) def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, ndmin=0): + from pypy.module.micronumpy import strides + # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): w___array__ = space.lookup(w_object, "__array__") @@ -68,12 +69,9 @@ return w_ret # not an array or incorrect dtype - shape, elems_w = find_shape_and_elems(space, w_object, dtype) + shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype) if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): - for w_elem in elems_w: - if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): - w_elem = w_elem.get_scalar_value() - dtype = ufuncs.find_dtype_for_scalar(space, w_elem, dtype) + dtype = strides.find_dtype_for_seq(space, elems_w, dtype) if dtype is None: dtype = descriptor.get_dtype_cache(space).w_float64dtype elif dtype.is_str_or_unicode() and dtype.elsize < 1: @@ -83,10 +81,10 @@ if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) - arr_iter = w_arr.create_iter() - for w_elem in elems_w: - arr_iter.setitem(dtype.coerce(space, w_elem)) - arr_iter.next() + if len(elems_w) == 1: + w_arr.set_scalar_value(dtype.coerce(space, elems_w[0])) + else: + loop.assign(space, w_arr, elems_w) return w_arr diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -164,6 +164,13 @@ arr_iter.setitem(box) arr_iter.next() +def assign(space, arr, seq): + arr_iter = arr.create_iter() + arr_dtype = arr.get_dtype() + for item in seq: + arr_iter.setitem(arr_dtype.coerce(space, item)) + arr_iter.next() + where_driver = jit.JitDriver(name='numpy_where', greens = ['shapelen', 'dtype', 'arr_dtype'], reds = 'auto') diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -181,6 +181,10 @@ return [], [w_iterable] if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): return [], [w_iterable] + return _find_shape_and_elems(space, w_iterable, is_rec_type) + + +def _find_shape_and_elems(space, w_iterable, is_rec_type): shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) while True: @@ -210,6 +214,25 @@ batch = new_batch +def find_dtype_for_seq(space, elems_w, dtype): + from pypy.module.micronumpy.ufuncs import find_dtype_for_scalar + if len(elems_w) == 1: + w_elem = elems_w[0] + if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): + w_elem = w_elem.get_scalar_value() + return find_dtype_for_scalar(space, w_elem, dtype) + return _find_dtype_for_seq(space, elems_w, dtype) + + +def _find_dtype_for_seq(space, elems_w, dtype): + from pypy.module.micronumpy.ufuncs import find_dtype_for_scalar + for w_elem in elems_w: + if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): + w_elem = w_elem.get_scalar_value() + dtype = find_dtype_for_scalar(space, w_elem, dtype) + return dtype + + def to_coords(space, shape, size, order, w_item_or_slice): '''Returns a start coord, step, and length. ''' From noreply at buildbot.pypy.org Thu Mar 13 23:21:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Mar 2014 23:21:03 +0100 (CET) Subject: [pypy-commit] pypy default: Small updates Message-ID: <20140313222103.0304D1D2626@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69941:2a1dbb0f0d8a Date: 2014-03-13 23:20 +0100 http://bitbucket.org/pypy/pypy/changeset/2a1dbb0f0d8a/ Log: Small updates diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -187,7 +187,7 @@ No, we found no way of doing that. The JIT generates machine code containing a large number of constant addresses --- constant at the time -the machine code is written. The vast majority is probably not at all +the machine code is generated. The vast majority is probably not at all constants that you find in the executable, with a nice link name. E.g. the addresses of Python classes are used all the time, but Python classes don't come statically from the executable; they are created anew @@ -212,12 +212,16 @@ garbage collection, implementation of various things like arbitrarily long integers, etc. -Currently, we have preliminary versions of a JavaScript interpreter -(Leonardo Santagada as his Summer of PyPy project), a `Prolog interpreter`_ -(Carl Friedrich Bolz as his Bachelor thesis), and a `SmallTalk interpreter`_ +Currently, we have `Topaz`_, a Ruby interpreter; `Hippy`_, a PHP +interpreter; preliminary versions of a `JavaScript interpreter`_ +(Leonardo Santagada as his Summer of PyPy project); a `Prolog interpreter`_ +(Carl Friedrich Bolz as his Bachelor thesis); and a `SmallTalk interpreter`_ (produced during a sprint). On the `PyPy bitbucket page`_ there is also a Scheme and an Io implementation; both of these are unfinished at the moment. +.. _`Topaz`: http://topazruby.com/ +.. _`Hippy`: http://morepypy.blogspot.ch/2012/07/hello-everyone.html +.. _`JavaScript interpreter`: https://bitbucket.org/pypy/lang-js/ .. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/ .. _`SmallTalk interpreter`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`PyPy bitbucket page`: https://bitbucket.org/pypy/ From noreply at buildbot.pypy.org Thu Mar 13 23:52:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 13 Mar 2014 23:52:35 +0100 (CET) Subject: [pypy-commit] pypy default: only set self.storage once here Message-ID: <20140313225235.6D23E1C01FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69942:154545271aa6 Date: 2014-03-13 18:36 -0400 http://bitbucket.org/pypy/pypy/changeset/154545271aa6/ Log: only set self.storage once here diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -368,13 +368,10 @@ class ConcreteArray(ConcreteArrayNotOwning): def __init__(self, shape, dtype, order, strides, backstrides, storage=lltype.nullptr(RAW_STORAGE)): - null_storage = lltype.nullptr(RAW_STORAGE) + if storage == lltype.nullptr(RAW_STORAGE): + storage = dtype.itemtype.malloc(support.product(shape) * dtype.elsize) ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, backstrides, - null_storage) - if storage == lltype.nullptr(RAW_STORAGE): - self.storage = dtype.itemtype.malloc(self.size) - else: - self.storage = storage + storage) def __del__(self): free_raw_storage(self.storage, track_allocation=False) From noreply at buildbot.pypy.org Fri Mar 14 00:04:59 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 14 Mar 2014 00:04:59 +0100 (CET) Subject: [pypy-commit] pypy numpypy-nditer: handle None in op inputs Message-ID: <20140313230459.E70521C01FC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r69943:926c252e5f8e Date: 2014-03-14 00:59 +0200 http://bitbucket.org/pypy/pypy/changeset/926c252e5f8e/ Log: handle None in op inputs diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -7,6 +7,7 @@ shape_agreement_multiple) from pypy.module.micronumpy.iterators import ArrayIter, SliceIterator from pypy.module.micronumpy.concrete import SliceArray +from pypy.module.micronumpy import ufuncs class AbstractIterator(object): @@ -34,6 +35,9 @@ def getitem(self, space, array): return self.op_flags.get_it_item[self.index](space, array, self.it) + def setitem(self, space, array, val): + xxx + class BoxIterator(IteratorMixin, AbstractIterator): index = 0 @@ -220,6 +224,15 @@ return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) +def convert_to_array_or_none(space, w_elem): + ''' + None will be passed through, all others will be converted + ''' + if space.is_none(w_elem): + return None + return convert_to_array(space, w_elem) + + class IndexIterator(object): def __init__(self, shape, backward=False): self.shape = shape @@ -267,7 +280,7 @@ if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) - self.seq = [convert_to_array(space, w_elem) for w_elem in w_seq_as_list] + self.seq = [convert_to_array_or_none(space, w_elem) for w_elem in w_seq_as_list] else: self.seq =[convert_to_array(space, w_seq)] parse_func_flags(space, self, w_flags) @@ -280,6 +293,25 @@ 'nditer op_dtypes kwarg not implemented yet')) self.iters=[] self.shape = iter_shape = shape_agreement_multiple(space, self.seq) + outarg = [i for i in range(len(self.seq)) if self.seq[i] is None] + if len(outarg) > 0: + # Make None operands writeonly and flagged for + # allocation, and everything else defaults to readonly. To write + # to a provided operand, you must specify the write flag manually. + out_dtype = None + for elem in self.seq: + if elem is None: + continue + if isinstance(elem, W_NDimArray) and elem.is_scalar(): + elem = elem.get_scalar_value() + out_dtype = ufuncs.find_binop_result_dtype(space, + elem.get_dtype(), out_dtype) + for i in outarg: + self.op_flags[i].get_it_item = (get_readwrite_item, + get_readwrite_slice) + self.op_flags[i].allocate = True + # XXX can we postpone allocation to later? + self.seq[i] = W_NDimArray.from_shape(space, iter_shape, out_dtype) if self.tracked_index != "": if self.order == "K": self.order = self.seq[0].implementation.order From noreply at buildbot.pypy.org Fri Mar 14 00:05:01 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 14 Mar 2014 00:05:01 +0100 (CET) Subject: [pypy-commit] pypy numpypy-nditer: cleanup Message-ID: <20140313230501.751A01C01FC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r69944:a51c458926c0 Date: 2014-03-14 01:03 +0200 http://bitbucket.org/pypy/pypy/changeset/a51c458926c0/ Log: cleanup diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -302,8 +302,6 @@ for elem in self.seq: if elem is None: continue - if isinstance(elem, W_NDimArray) and elem.is_scalar(): - elem = elem.get_scalar_value() out_dtype = ufuncs.find_binop_result_dtype(space, elem.get_dtype(), out_dtype) for i in outarg: From noreply at buildbot.pypy.org Fri Mar 14 01:55:34 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 14 Mar 2014 01:55:34 +0100 (CET) Subject: [pypy-commit] pypy py3k: adjust the test as we follow 3.2.4? behavior now Message-ID: <20140314005534.DD19D1D2351@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69945:5e094da24d56 Date: 2014-03-13 14:29 -0700 http://bitbucket.org/pypy/pypy/changeset/5e094da24d56/ Log: adjust the test as we follow 3.2.4? behavior now diff --git a/lib-python/3/test/test_codeccallbacks.py b/lib-python/3/test/test_codeccallbacks.py --- a/lib-python/3/test/test_codeccallbacks.py +++ b/lib-python/3/test/test_codeccallbacks.py @@ -262,12 +262,12 @@ self.assertEqual( b"\\u3042\u3xxx".decode("unicode-escape", "test.handler1"), - "\u3042[<92><117><51><120>]xx" + "\u3042[<92><117><51>]xxx" ) self.assertEqual( b"\\u3042\u3xx".decode("unicode-escape", "test.handler1"), - "\u3042[<92><117><51><120><120>]" + "\u3042[<92><117><51>]xx" ) self.assertEqual( From noreply at buildbot.pypy.org Fri Mar 14 05:13:42 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 14 Mar 2014 05:13:42 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140314041342.811411D23CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69946:25f71fd3b631 Date: 2014-03-13 21:12 -0700 http://bitbucket.org/pypy/pypy/changeset/25f71fd3b631/ Log: merge default diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -51,16 +51,22 @@ self.config = config self.logfile = logfile # preferably line buffered - def write_log_entry(self, testpath, lettercode, longrepr): + def write_log_entry(self, testpath, lettercode, longrepr, sections=[]): py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile) for line in longrepr.splitlines(): py.builtin.print_(" %s" % line, file=self.logfile) + for key, text in sections: + py.builtin.print_(" ", file=self.logfile) + py.builtin.print_(" -------------------- %s --------------------" + % key.rstrip(), file=self.logfile) + py.builtin.print_(" %s" % (text.rstrip().replace('\n', '\n '),), + file=self.logfile) def log_outcome(self, report, lettercode, longrepr): testpath = getattr(report, 'nodeid', None) if testpath is None: testpath = report.fspath - self.write_log_entry(testpath, lettercode, longrepr) + self.write_log_entry(testpath, lettercode, longrepr, report.sections) def pytest_runtest_logreport(self, report): if report.when != "call" and report.passed: diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -187,7 +187,7 @@ No, we found no way of doing that. The JIT generates machine code containing a large number of constant addresses --- constant at the time -the machine code is written. The vast majority is probably not at all +the machine code is generated. The vast majority is probably not at all constants that you find in the executable, with a nice link name. E.g. the addresses of Python classes are used all the time, but Python classes don't come statically from the executable; they are created anew @@ -212,12 +212,16 @@ garbage collection, implementation of various things like arbitrarily long integers, etc. -Currently, we have preliminary versions of a JavaScript interpreter -(Leonardo Santagada as his Summer of PyPy project), a `Prolog interpreter`_ -(Carl Friedrich Bolz as his Bachelor thesis), and a `SmallTalk interpreter`_ +Currently, we have `Topaz`_, a Ruby interpreter; `Hippy`_, a PHP +interpreter; preliminary versions of a `JavaScript interpreter`_ +(Leonardo Santagada as his Summer of PyPy project); a `Prolog interpreter`_ +(Carl Friedrich Bolz as his Bachelor thesis); and a `SmallTalk interpreter`_ (produced during a sprint). On the `PyPy bitbucket page`_ there is also a Scheme and an Io implementation; both of these are unfinished at the moment. +.. _`Topaz`: http://topazruby.com/ +.. _`Hippy`: http://morepypy.blogspot.ch/2012/07/hello-everyone.html +.. _`JavaScript interpreter`: https://bitbucket.org/pypy/lang-js/ .. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/ .. _`SmallTalk interpreter`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`PyPy bitbucket page`: https://bitbucket.org/pypy/ diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -24,7 +24,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', } - for c in ['CLIP', 'WRAP', 'RAISE']: + for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -58,10 +58,10 @@ elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, - backstrides, storage=storage) + backstrides, storage=storage) else: impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, - backstrides, storage) + backstrides, storage) if w_subtype: w_ret = space.allocate_instance(W_NDimArray, w_subtype) W_NDimArray.__init__(w_ret, impl) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -87,7 +87,8 @@ value[0] = self.value builder = StringBuilder() - builder.append_charpsize(rffi.cast(rffi.CCHARP, value), rffi.sizeof(lltype.typeOf(self.value))) + builder.append_charpsize(rffi.cast(rffi.CCHARP, value), + rffi.sizeof(lltype.typeOf(self.value))) ret = builder.build() lltype.free(value, flavor="raw") @@ -117,7 +118,8 @@ value[1] = self.imag builder = StringBuilder() - builder.append_charpsize(rffi.cast(rffi.CCHARP, value), rffi.sizeof(lltype.typeOf(self.real)) * 2) + builder.append_charpsize(rffi.cast(rffi.CCHARP, value), + rffi.sizeof(lltype.typeOf(self.real)) * 2) ret = builder.build() lltype.free(value, flavor="raw") @@ -179,27 +181,27 @@ dtype = self.get_dtype(space) return space.wrap(dtype.itemtype.bool(self)) + def _unaryop_impl(ufunc_name): + def impl(self, space, w_out=None): + from pypy.module.micronumpy import ufuncs + return getattr(ufuncs.get(space), ufunc_name).call( + space, [self, w_out]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) + def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import ufuncs - return getattr(ufuncs.get(space), ufunc_name).call(space, - [self, w_other, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call( + space, [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import ufuncs - return getattr(ufuncs.get(space), ufunc_name).call(space, - [w_other, self, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call( + space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) - def _unaryop_impl(ufunc_name): - def impl(self, space, w_out=None): - from pypy.module.micronumpy import ufuncs - return getattr(ufuncs.get(space), ufunc_name).call(space, - [self, w_out]) - return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) - descr_add = _binop_impl("add") descr_sub = _binop_impl("subtract") descr_mul = _binop_impl("multiply") diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -368,13 +368,10 @@ class ConcreteArray(ConcreteArrayNotOwning): def __init__(self, shape, dtype, order, strides, backstrides, storage=lltype.nullptr(RAW_STORAGE)): - null_storage = lltype.nullptr(RAW_STORAGE) + if storage == lltype.nullptr(RAW_STORAGE): + storage = dtype.itemtype.malloc(support.product(shape) * dtype.elsize) ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, backstrides, - null_storage) - if storage == lltype.nullptr(RAW_STORAGE): - self.storage = dtype.itemtype.malloc(self.size) - else: - self.storage = storage + storage) def __del__(self): free_raw_storage(self.storage, track_allocation=False) diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -1,3 +1,5 @@ +MAXDIMS = 32 + BOOL = 0 BYTE = 1 UBYTE = 2 diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -5,7 +5,6 @@ from pypy.module.micronumpy import descriptor, loop, ufuncs from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.converters import shape_converter -from pypy.module.micronumpy.strides import find_shape_and_elems def build_scalar(space, w_dtype, w_state): @@ -27,6 +26,8 @@ @unwrap_spec(ndmin=int, copy=bool, subok=bool) def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, ndmin=0): + from pypy.module.micronumpy import strides + # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): w___array__ = space.lookup(w_object, "__array__") @@ -68,12 +69,9 @@ return w_ret # not an array or incorrect dtype - shape, elems_w = find_shape_and_elems(space, w_object, dtype) + shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype) if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): - for w_elem in elems_w: - if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): - w_elem = w_elem.get_scalar_value() - dtype = ufuncs.find_dtype_for_scalar(space, w_elem, dtype) + dtype = strides.find_dtype_for_seq(space, elems_w, dtype) if dtype is None: dtype = descriptor.get_dtype_cache(space).w_float64dtype elif dtype.is_str_or_unicode() and dtype.elsize < 1: @@ -83,10 +81,10 @@ if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) - arr_iter = w_arr.create_iter() - for w_elem in elems_w: - arr_iter.setitem(dtype.coerce(space, w_elem)) - arr_iter.next() + if len(elems_w) == 1: + w_arr.set_scalar_value(dtype.coerce(space, elems_w[0])) + else: + loop.assign(space, w_arr, elems_w) return w_arr diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -164,6 +164,13 @@ arr_iter.setitem(box) arr_iter.next() +def assign(space, arr, seq): + arr_iter = arr.create_iter() + arr_dtype = arr.get_dtype() + for item in seq: + arr_iter.setitem(arr_dtype.coerce(space, item)) + arr_iter.next() + where_driver = jit.JitDriver(name='numpy_where', greens = ['shapelen', 'dtype', 'arr_dtype'], reds = 'auto') diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -181,6 +181,10 @@ return [], [w_iterable] if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): return [], [w_iterable] + return _find_shape_and_elems(space, w_iterable, is_rec_type) + + +def _find_shape_and_elems(space, w_iterable, is_rec_type): shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) while True: @@ -210,6 +214,25 @@ batch = new_batch +def find_dtype_for_seq(space, elems_w, dtype): + from pypy.module.micronumpy.ufuncs import find_dtype_for_scalar + if len(elems_w) == 1: + w_elem = elems_w[0] + if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): + w_elem = w_elem.get_scalar_value() + return find_dtype_for_scalar(space, w_elem, dtype) + return _find_dtype_for_seq(space, elems_w, dtype) + + +def _find_dtype_for_seq(space, elems_w, dtype): + from pypy.module.micronumpy.ufuncs import find_dtype_for_scalar + for w_elem in elems_w: + if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): + w_elem = w_elem.get_scalar_value() + dtype = find_dtype_for_scalar(space, w_elem, dtype) + return dtype + + def to_coords(space, shape, size, order, w_item_or_slice): '''Returns a start coord, step, and length. ''' diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -248,6 +248,7 @@ def test_constants(self): import numpy as np + assert np.MAXDIMS is 32 assert np.CLIP is 0 assert np.WRAP is 1 assert np.RAISE is 2 diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -108,6 +108,8 @@ copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name) @jit.dont_look_inside + @signature(types.any(), types.any(), types.int(), types.int(), + returns=types.none()) def copy_raw_to_string(ptrsrc, dst, dststart, length): # xxx Warning: same note as above apply: don't do this at home assert length >= 0 From noreply at buildbot.pypy.org Fri Mar 14 07:50:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 07:50:40 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Remove two outdated files Message-ID: <20140314065040.DB4291C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69947:eb70718a0080 Date: 2014-03-14 07:49 +0100 http://bitbucket.org/pypy/pypy/changeset/eb70718a0080/ Log: Remove two outdated files diff --git a/rpython/translator/stm/src_stm/stm/bucket.c b/rpython/translator/stm/src_stm/stm/bucket.c deleted file mode 100644 --- a/rpython/translator/stm/src_stm/stm/bucket.c +++ /dev/null @@ -1,16 +0,0 @@ -/* Imported by rpython/translator/stm/import_stmgc.py */ -#define NUM_BUCKETS 93 - -static struct list_s *debug_seen_buckets[NUM_BUCKETS]; - -static bool debug_add_seen(object_t *obj) -{ - long n = ((uintptr_t)obj) % NUM_BUCKETS; - struct list_s *lst = debug_seen_buckets[n]; - long i; - for (i = list_count(lst); i--; ) - if (list_item(lst, i) == (uintptr_t)obj) - return false; - LIST_APPEND(debug_seen_buckets[n], obj); - return true; -} diff --git a/rpython/translator/stm/src_stm/stm/debugcheck.c b/rpython/translator/stm/src_stm/stm/debugcheck.c deleted file mode 100644 --- a/rpython/translator/stm/src_stm/stm/debugcheck.c +++ /dev/null @@ -1,144 +0,0 @@ -/* Imported by rpython/translator/stm/import_stmgc.py */ -#ifndef _STM_CORE_H_ -# error "must be compiled via stmgc.c" -#endif - - -#define DEBUG_SEEN_NO '.' -#define DEBUG_SEEN_TAIL '#' - -static char *debug_seen; - - -static void debug_object(object_t *obj) -{ - if (obj == NULL) - return; - - assert(((uintptr_t)obj & 7) == 0); - uintptr_t rmindex = (((uintptr_t)obj) >> 4) - READMARKER_START; - assert(rmindex < READMARKER_END - READMARKER_START); - if (debug_seen[rmindex] != DEBUG_SEEN_NO) { - assert(debug_seen[rmindex] == (uintptr_t)obj & 0x0f); - return; - } - debug_seen[rmindex++] = (uintptr_t)obj & 0x0f; - - char *realobj0 = (char *)REAL_ADDRESS(stm_object_pages, obj); - ssize_t size = stmcb_size_rounded_up(realobj0); - assert(size >= 16); - assert((size & 7) == 0); - while (rmindex < (((uintptr_t)obj + size) >> 4) - READMARKER_START) { - assert(debug_seen[rmindex] = DEBUG_SEEN_NO); - debug_seen[rmindex++] = DEBUG_SEEN_TAIL; - } - - bool small_uniform = false; - - uintptr_t first_page = ((uintptr_t)obj) / 4096; - assert(first_page >= FIRST_OBJECT_PAGE); - assert(first_page < NB_PAGES - 1); - - if (first_page < END_NURSERY_PAGE) { - assert(_is_in_nursery(obj)); - - /* object must be within the allocated part of the nursery */ - uintptr_t nursofs = ((uintptr_t)obj) - FIRST_NURSERY_PAGE * 4096UL; - assert(nursofs < nursery_ctl.used); - } - else { - assert(!_is_in_nursery(obj)); - - if (realobj0 < uninitialized_page_start) { - /* a large object */ - assert(realobj0 + size <= uninitialized_page_start); - } - else { - /* a small object in a uniform page */ - small_uniform = true; - assert(realobj0 >= uninitialized_page_stop); - assert((uintptr_t)obj + size <= (NB_PAGES - 1) * 4096UL); - } - } - - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - ...; - } - //...; -} - -static void debug_check_roots(void) -{ - stm_thread_local_t *tl = stm_thread_locals; - do { - object_t **current = tl->shadowstack; - object_t **base = tl->shadowstack_base; - while (current-- != base) { - debug_object(*current); - } - tl = tl->next; - } while (tl != stm_thread_locals); -} - -static void debug_check_segments(void) -{ - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - struct stm_priv_segment_info_t *pseg = get_priv_segment(i); - - assert(pseg->pub.segment_num == i); - assert(pseg->pub.segment_base == get_segment_base(i)); - - if (pseg->pub.nursery_current == NULL) { - assert(pseg->real_nursery_section_end == NULL); - } - else { - assert(pseg->real_nursery_section_end != NULL); - assert((pseg->real_nursery_section_end & NURSERY_LINE) == 0); - assert((uintptr_t)(pseg->real_nursery_section_end - - (uintptr_t)pseg->pub.nursery_current) - <= NURSERY_SECTION_SIZE); - } - assert(pseg->pub.v_nursery_section_end == - pseg->real_nursery_section_end || - pseg->pub.v_nursery_section_end == NSE_SIGNAL || - pseg->pub.v_nursery_section_end == NSE_SIGNAL_DONE); - - assert((pseg->pub.running_thread != NULL) == - (pseg->transaction_state != TS_NONE)); - - if (pseg->transaction_state != TS_NONE) { - assert(1 <= pseg->min_read_version_outside_nursery); - assert(pseg->min_read_version_outside_nursery <= - pseg->pub.transaction_read_version); - } - } -} - -void stm_debug_check_objects(void) -{ - /* Not thread-safe! - - Check the consistency of every object reachable from the roots, - the pages, the global allocation variables, the various markers, - and so on. - - Reading this is probably a good way to learn about all the - implicit invariants. - */ - - debug_seen = malloc(READMARKER_END - READMARKER_START); - memset(debug_seen, DEBUG_SEEN_NO, READMARKER_END - READMARKER_START); - - /* Check the segment state */ - debug_check_segments(); - - /* Follow objects from the roots */ - debug_check_roots(); - - - - free(debug_seen); - debug_seen = NULL; -} From noreply at buildbot.pypy.org Fri Mar 14 08:01:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 08:01:22 +0100 (CET) Subject: [pypy-commit] stmgc default: "Implement" the recent addition to the c7 interface, with XXXes Message-ID: <20140314070122.BFA7C1C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1000:3281d387fbf5 Date: 2014-03-14 08:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/3281d387fbf5/ Log: "Implement" the recent addition to the c7 interface, with XXXes diff --git a/gil-c7/stmgc.c b/gil-c7/stmgc.c --- a/gil-c7/stmgc.c +++ b/gil-c7/stmgc.c @@ -167,6 +167,11 @@ (char *)obj < _stm_nursery_end); } +long stm_can_move(object_t *obj) +{ + return _is_in_nursery(obj); +} + #define GCWORD_MOVED ((object_t *) -42) static void minor_trace_if_young(object_t **pobj) @@ -264,8 +269,9 @@ memset(_stm_nursery_base, 0, NURSERY_SIZE); } -void do_minor_collect(void) +void stm_collect(long level) { + /* 'level' is ignored, only minor collections are implemented */ collect_roots_in_nursery(); collect_oldrefs_to_nursery(); throw_away_nursery(); @@ -275,7 +281,7 @@ { /* run minor collection */ //fprintf(stderr, "minor collect\n"); - do_minor_collect(); + stm_collect(0); char *p = _stm_nursery_current; char *end = p + size_rounded_up; diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -70,18 +70,19 @@ void stm_setup(void); void stm_teardown(void); +void stm_collect(long level); inline static void stm_start_inevitable_transaction(stm_thread_local_t *tl) { if (pthread_mutex_lock(&_stm_gil) != 0) abort(); _stm_tloc = tl; } -void do_minor_collect(void); inline static void stm_commit_transaction(void) { - do_minor_collect(); + stm_collect(0); _stm_tloc = NULL; if (pthread_mutex_unlock(&_stm_gil) != 0) abort(); } inline static void stm_become_inevitable(const char *msg) { } +static inline int stm_is_inevitable(void) { return 1; } inline static void stm_read(object_t *ob) { } void _stm_write_slowpath(object_t *); @@ -92,6 +93,7 @@ } inline static char *_stm_real_address(object_t *ob) { return (char *)ob; } +static inline void stm_safe_point(void) { } #define STM_START_TRANSACTION(tl, here) do { \ (void)&(here); \ @@ -105,8 +107,26 @@ extern ssize_t stmcb_size_rounded_up(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); -inline static object_t *stm_setup_prebuilt(object_t *preb) -{ +inline static object_t *stm_setup_prebuilt(object_t *preb) { preb->gil_flags |= _STM_GCFLAG_WRITE_BARRIER; return preb; } +inline static object_t *stm_setup_prebuilt_weakref(object_t *preb) { + return stm_setup_prebuilt(preb); +} + +inline static long stm_identityhash(object_t *obj) { + return (long)obj; // XXX fails after a minor collection +} +inline static long stm_id(object_t *obj) { + return (long)obj; +} +inline static void stm_set_prebuilt_identityhash(object_t *obj, long hash) { + // XXX ignored +} +long stm_can_move(object_t *); + +inline static void stm_call_on_abort(stm_thread_local_t *tl, void *key, + void callback(void *)) { + // XXX ignored +} From noreply at buildbot.pypy.org Fri Mar 14 08:02:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 08:02:47 +0100 (CET) Subject: [pypy-commit] stmgc default: Name this too Message-ID: <20140314070247.8ECE51C11A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1001:3e4c91bfbdaa Date: 2014-03-14 08:02 +0100 http://bitbucket.org/pypy/stmgc/changeset/3e4c91bfbdaa/ Log: Name this too diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -15,7 +15,7 @@ uint32_t gil_flags; } object_t; -typedef struct { +typedef struct stm_thread_local_s { object_t **shadowstack; object_t **shadowstack_base; object_t *thread_local_obj; From noreply at buildbot.pypy.org Fri Mar 14 08:05:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 08:05:23 +0100 (CET) Subject: [pypy-commit] stmgc default: Add the usual ifndef Message-ID: <20140314070523.4395D1C11A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1002:f9a5b96d5e07 Date: 2014-03-14 08:05 +0100 http://bitbucket.org/pypy/stmgc/changeset/f9a5b96d5e07/ Log: Add the usual ifndef diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -1,3 +1,6 @@ +#ifndef _STMGC_H +#define _STMGC_H + #include #include #include @@ -130,3 +133,5 @@ void callback(void *)) { // XXX ignored } + +#endif From noreply at buildbot.pypy.org Fri Mar 14 08:07:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 08:07:13 +0100 (CET) Subject: [pypy-commit] stmgc default: Add this field Message-ID: <20140314070713.5C9031C11A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1003:c6069a290caa Date: 2014-03-14 08:07 +0100 http://bitbucket.org/pypy/stmgc/changeset/c6069a290caa/ Log: Add this field diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -22,6 +22,7 @@ object_t **shadowstack; object_t **shadowstack_base; object_t *thread_local_obj; + long last_abort__bytes_in_nursery; } stm_thread_local_t; extern stm_thread_local_t *_stm_tloc; @@ -64,6 +65,7 @@ tl->shadowstack_base = (object_t **)malloc(768*1024); assert(tl->shadowstack_base); tl->shadowstack = tl->shadowstack_base; + tl->last_abort__bytes_in_nursery = 0; } inline static void stm_unregister_thread_local(stm_thread_local_t *tl) { free(tl->shadowstack_base); From noreply at buildbot.pypy.org Fri Mar 14 08:08:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 08:08:43 +0100 (CET) Subject: [pypy-commit] stmgc default: Rename this internal field, accessed by stmgcintf.c Message-ID: <20140314070843.8D73E1C11A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1004:a659d2f6d508 Date: 2014-03-14 08:08 +0100 http://bitbucket.org/pypy/stmgc/changeset/a659d2f6d508/ Log: Rename this internal field, accessed by stmgcintf.c diff --git a/gil-c7/stmgc.c b/gil-c7/stmgc.c --- a/gil-c7/stmgc.c +++ b/gil-c7/stmgc.c @@ -157,13 +157,13 @@ #define NB_NURSERY_PAGES 1024 // 4MB #define NURSERY_SIZE (NB_NURSERY_PAGES * 4096UL) -char *_stm_nursery_base = NULL; +char *_stm_nursery_start = NULL; char *_stm_nursery_current = NULL; char *_stm_nursery_end = NULL; static bool _is_in_nursery(object_t *obj) { - return ((char *)obj >= _stm_nursery_base && + return ((char *)obj >= _stm_nursery_start && (char *)obj < _stm_nursery_end); } @@ -259,14 +259,14 @@ static void throw_away_nursery(void) { - if (_stm_nursery_base == NULL) { - _stm_nursery_base = malloc(NURSERY_SIZE); - assert(_stm_nursery_base); - _stm_nursery_end = _stm_nursery_base + NURSERY_SIZE; + if (_stm_nursery_start == NULL) { + _stm_nursery_start = malloc(NURSERY_SIZE); + assert(_stm_nursery_start); + _stm_nursery_end = _stm_nursery_start + NURSERY_SIZE; } - _stm_nursery_current = _stm_nursery_base; - memset(_stm_nursery_base, 0, NURSERY_SIZE); + _stm_nursery_current = _stm_nursery_start; + memset(_stm_nursery_start, 0, NURSERY_SIZE); } void stm_collect(long level) From noreply at buildbot.pypy.org Fri Mar 14 08:13:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 08:13:08 +0100 (CET) Subject: [pypy-commit] stmgc default: Backed out changeset a659d2f6d508 Message-ID: <20140314071308.D93B11C06AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1005:0a1d4ca5a3c8 Date: 2014-03-14 08:09 +0100 http://bitbucket.org/pypy/stmgc/changeset/0a1d4ca5a3c8/ Log: Backed out changeset a659d2f6d508 diff --git a/gil-c7/stmgc.c b/gil-c7/stmgc.c --- a/gil-c7/stmgc.c +++ b/gil-c7/stmgc.c @@ -157,13 +157,13 @@ #define NB_NURSERY_PAGES 1024 // 4MB #define NURSERY_SIZE (NB_NURSERY_PAGES * 4096UL) -char *_stm_nursery_start = NULL; +char *_stm_nursery_base = NULL; char *_stm_nursery_current = NULL; char *_stm_nursery_end = NULL; static bool _is_in_nursery(object_t *obj) { - return ((char *)obj >= _stm_nursery_start && + return ((char *)obj >= _stm_nursery_base && (char *)obj < _stm_nursery_end); } @@ -259,14 +259,14 @@ static void throw_away_nursery(void) { - if (_stm_nursery_start == NULL) { - _stm_nursery_start = malloc(NURSERY_SIZE); - assert(_stm_nursery_start); - _stm_nursery_end = _stm_nursery_start + NURSERY_SIZE; + if (_stm_nursery_base == NULL) { + _stm_nursery_base = malloc(NURSERY_SIZE); + assert(_stm_nursery_base); + _stm_nursery_end = _stm_nursery_base + NURSERY_SIZE; } - _stm_nursery_current = _stm_nursery_start; - memset(_stm_nursery_start, 0, NURSERY_SIZE); + _stm_nursery_current = _stm_nursery_base; + memset(_stm_nursery_base, 0, NURSERY_SIZE); } void stm_collect(long level) From noreply at buildbot.pypy.org Fri Mar 14 08:13:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 08:13:09 +0100 (CET) Subject: [pypy-commit] stmgc default: More compatibility Message-ID: <20140314071309.EEC8D1C06AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1006:f0a5e8de6637 Date: 2014-03-14 08:13 +0100 http://bitbucket.org/pypy/stmgc/changeset/f0a5e8de6637/ Log: More compatibility diff --git a/gil-c7/stmgc.c b/gil-c7/stmgc.c --- a/gil-c7/stmgc.c +++ b/gil-c7/stmgc.c @@ -4,6 +4,7 @@ pthread_mutex_t _stm_gil = PTHREAD_MUTEX_INITIALIZER; stm_thread_local_t *_stm_tloc; +struct stm_segment_info_s _stm_segment; /************************************************************/ @@ -160,6 +161,7 @@ char *_stm_nursery_base = NULL; char *_stm_nursery_current = NULL; char *_stm_nursery_end = NULL; +#define _stm_nursery_start ((uintptr_t)_stm_nursery_base) static bool _is_in_nursery(object_t *obj) { diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -28,6 +28,10 @@ extern stm_thread_local_t *_stm_tloc; extern char *_stm_nursery_current, *_stm_nursery_end; +struct stm_segment_info_s { stm_jmpbuf_t *jmpbuf_ptr; }; +extern struct stm_segment_info_s _stm_segment; +#define STM_SEGMENT (&_stm_segment) + #ifdef NDEBUG #define OPT_ASSERT(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) #else From noreply at buildbot.pypy.org Fri Mar 14 08:14:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 08:14:42 +0100 (CET) Subject: [pypy-commit] stmgc default: Missing stuff Message-ID: <20140314071442.D816D1C06AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1007:1a6ddd68127f Date: 2014-03-14 08:14 +0100 http://bitbucket.org/pypy/stmgc/changeset/1a6ddd68127f/ Log: Missing stuff diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -91,6 +91,7 @@ if (pthread_mutex_unlock(&_stm_gil) != 0) abort(); } inline static void stm_become_inevitable(const char *msg) { } +inline static void _stm_become_inevitable(const char *msg) { } static inline int stm_is_inevitable(void) { return 1; } inline static void stm_read(object_t *ob) { } @@ -111,6 +112,7 @@ #define STM_PUSH_ROOT(tl, p) (*((tl).shadowstack++) = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))*(--(tl).shadowstack)) +#define STM_POP_ROOT_RET(tl) (*(--(tl).shadowstack)) extern ssize_t stmcb_size_rounded_up(struct object_s *); From noreply at buildbot.pypy.org Fri Mar 14 09:13:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 09:13:09 +0100 (CET) Subject: [pypy-commit] stmgc default: Weakrefs, copied from c7/ Message-ID: <20140314081309.1B9421C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1008:c459d0dc116d Date: 2014-03-14 09:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/c459d0dc116d/ Log: Weakrefs, copied from c7/ diff --git a/gil-c7/stmgc.c b/gil-c7/stmgc.c --- a/gil-c7/stmgc.c +++ b/gil-c7/stmgc.c @@ -117,10 +117,12 @@ #define GCFLAG_WRITE_BARRIER _STM_GCFLAG_WRITE_BARRIER static struct list_s *objects_pointing_to_nursery; +static struct list_s *young_weakrefs; void stm_setup(void) { objects_pointing_to_nursery = list_create(); + young_weakrefs = list_create(); } void stm_teardown(void) @@ -271,11 +273,58 @@ memset(_stm_nursery_base, 0, NURSERY_SIZE); } +#define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((char *)(wr)) + (sz) - sizeof(void*))) + +static void move_young_weakrefs(void) +{ + LIST_FOREACH_R( + young_weakrefs, + object_t * /*item*/, + ({ + assert(_is_in_nursery(item)); + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)item; + + /* the following checks are done like in nursery.c: */ + if (pforwarded_array[0] != GCWORD_MOVED) { + /* weakref dies */ + continue; + } + + item = pforwarded_array[1]; /* moved location */ + + assert(!_is_in_nursery(item)); + + ssize_t size = 16; + object_t *pointing_to = *WEAKREF_PTR(item, size); + assert(pointing_to != NULL); + + if (_is_in_nursery(pointing_to)) { + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)pointing_to; + /* the following checks are done like in nursery.c: */ + if (pforwarded_array[0] != GCWORD_MOVED) { + /* pointing_to dies */ + *WEAKREF_PTR(item, size) = NULL; + continue; /* no need to remember in old_weakrefs */ + } + else { + /* moved location */ + *WEAKREF_PTR(item, size) = pforwarded_array[1]; + } + } + else { + /* pointing_to was already old */ + } + //LIST_APPEND(STM_PSEGMENT->old_weakrefs, item); + })); + list_clear(young_weakrefs); +} + void stm_collect(long level) { /* 'level' is ignored, only minor collections are implemented */ collect_roots_in_nursery(); collect_oldrefs_to_nursery(); + move_young_weakrefs(); throw_away_nursery(); } @@ -291,3 +340,11 @@ _stm_nursery_current = end; return (object_t *)p; } + +object_t *stm_allocate_weakref(ssize_t size_rounded_up) +{ + assert(size_rounded_up == 16); + object_t *obj = stm_allocate(size_rounded_up); + LIST_APPEND(young_weakrefs, obj); + return obj; +} diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -47,6 +47,7 @@ object_t *_stm_allocate_external(ssize_t); object_t *_stm_allocate_slowpath(ssize_t); +object_t *stm_allocate_weakref(ssize_t size_rounded_up); inline static object_t *stm_allocate(ssize_t size_rounded_up) { OPT_ASSERT(size_rounded_up >= 16); From noreply at buildbot.pypy.org Fri Mar 14 09:16:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 09:16:28 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix Message-ID: <20140314081628.1C3681C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1009:85aa3bce7a2d Date: 2014-03-14 09:16 +0100 http://bitbucket.org/pypy/stmgc/changeset/85aa3bce7a2d/ Log: Fix diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -120,7 +120,8 @@ extern void stmcb_trace(struct object_s *, void (object_t **)); inline static object_t *stm_setup_prebuilt(object_t *preb) { - preb->gil_flags |= _STM_GCFLAG_WRITE_BARRIER; + if (preb != NULL) + preb->gil_flags |= _STM_GCFLAG_WRITE_BARRIER; return preb; } inline static object_t *stm_setup_prebuilt_weakref(object_t *preb) { From noreply at buildbot.pypy.org Fri Mar 14 09:28:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 09:28:29 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: detail Message-ID: <20140314082829.609EC1C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69949:2d525f862189 Date: 2014-03-14 09:27 +0100 http://bitbucket.org/pypy/pypy/changeset/2d525f862189/ Log: detail diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -52,7 +52,7 @@ (in which case pypy_stm_nursery_low_fill_mark is set to 0) */ uintptr_t current = (uintptr_t)STM_SEGMENT->nursery_current; - return current >= pypy_stm_nursery_low_fill_mark; + return current > pypy_stm_nursery_low_fill_mark; } From noreply at buildbot.pypy.org Fri Mar 14 11:18:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 11:18:11 +0100 (CET) Subject: [pypy-commit] stmgc default: Give up with the constrain that tree_insert() only accepts keys Message-ID: <20140314101811.AA1F61C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1010:48150a83b44a Date: 2014-03-14 11:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/48150a83b44a/ Log: Give up with the constrain that tree_insert() only accepts keys aligned to multiples of 8. diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -75,7 +75,7 @@ static wlog_t *_tree_find(char *entry, uintptr_t addr) { - uintptr_t key = addr; + uintptr_t key = TREE_HASH(addr); while (((long)entry) & 1) { /* points to a further level */ key >>= TREE_BITS; @@ -122,10 +122,9 @@ static void tree_insert(struct tree_s *tree, uintptr_t addr, uintptr_t val) { assert(addr != 0); /* the NULL key is reserved */ - assert(!(addr & (sizeof(void *) - 1))); /* the key must be aligned */ retry:; wlog_t *wlog; - uintptr_t key = addr; + uintptr_t key = TREE_HASH(addr); int shift = 0; char *p = (char *)(tree->toplevel.items); char *entry; @@ -155,7 +154,7 @@ _tree_grab(tree, sizeof(wlog_node_t)); if (node == NULL) goto retry; _tree_clear_node(node); - uintptr_t key1 = wlog1->addr; + uintptr_t key1 = TREE_HASH(wlog1->addr); char *p1 = (char *)(node->items); *(wlog_t **)(p1 + ((key1 >> shift) & TREE_MASK)) = wlog1; *(char **)p = ((char *)node) + 1; diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -82,17 +82,19 @@ supporting very high performance in TREE_FIND in the common case where there are no or few elements in the tree, but scaling correctly if the number of items becomes large (logarithmically, rather - than almost-constant-time with hash maps, but with low constants). */ + than almost-constant-time with hash maps, but with low constants). + The value 0 cannot be used as a key. +*/ #define TREE_BITS 4 #define TREE_ARITY (1 << TREE_BITS) -#define TREE_DEPTH_MAX ((sizeof(void*)*8 - 2 + TREE_BITS-1) / TREE_BITS) -/* sizeof(void*) = total number of bits - 2 = bits that we ignore anyway (2 or 3, conservatively 2) +#define TREE_DEPTH_MAX ((sizeof(void*)*8 + TREE_BITS-1) / TREE_BITS) +/* sizeof(void*)*8 = total number of bits (x + TREE_BITS-1) / TREE_BITS = divide by TREE_BITS, rounding up */ +#define TREE_HASH(key) ((key) ^ ((key) << 4)) #define TREE_MASK ((TREE_ARITY - 1) * sizeof(void*)) typedef struct { @@ -174,7 +176,7 @@ #define TREE_FIND(tree, addr1, result, goto_not_found) \ { \ - uintptr_t _key = (addr1); \ + uintptr_t _key = TREE_HASH(addr1); \ char *_p = (char *)((tree).toplevel.items); \ char *_entry = *(char **)(_p + (_key & TREE_MASK)); \ if (_entry == NULL) \ diff --git a/c7/stm/prebuilt.c b/c7/stm/prebuilt.c --- a/c7/stm/prebuilt.c +++ b/c7/stm/prebuilt.c @@ -16,13 +16,11 @@ return; /* If the object was already moved, it is stored in 'tree_prebuilt_objs'. - For now we use this dictionary, with keys being equal to the double - of the numeric address of the prebuilt object. We double them in - order to support addresses that are only 4-byte-aligned in the - static data. + For now we use this dictionary, with keys being equal to the numeric + address of the prebuilt object. */ wlog_t *item; - TREE_FIND(*tree_prebuilt_objs, 2 * (uintptr_t)obj, item, goto not_found); + TREE_FIND(*tree_prebuilt_objs, (uintptr_t)obj, item, goto not_found); *pstaticobj_invalid = (object_t *)item->val; /* already moved */ return; @@ -42,7 +40,7 @@ nobj->stm_flags = GCFLAG_WRITE_BARRIER; /* Add the object to the tree */ - tree_insert(tree_prebuilt_objs, 2 * (uintptr_t)obj, (uintptr_t)nobj); + tree_insert(tree_prebuilt_objs, (uintptr_t)obj, (uintptr_t)nobj); /* Done */ *pstaticobj_invalid = nobj; diff --git a/c7/test/test_list.py b/c7/test/test_list.py --- a/c7/test/test_list.py +++ b/c7/test/test_list.py @@ -65,34 +65,34 @@ def test_tree_add(): t = lib.tree_create() - lib.tree_insert(t, 23000, 456) - for i in range(0, 100000, 1000): - assert lib.tree_contains(t, i) == (i == 23000) + lib.tree_insert(t, 23, 456) + for i in range(0, 100): + assert lib.tree_contains(t, i) == (i == 23) lib.tree_free(t) def test_tree_is_cleared(): t = lib.tree_create() assert lib.tree_is_cleared(t) - lib.tree_insert(t, 23000, 456) + lib.tree_insert(t, 23, 456) assert not lib.tree_is_cleared(t) lib.tree_free(t) def test_tree_delete_item(): t = lib.tree_create() - lib.tree_insert(t, 23000, 456) - lib.tree_insert(t, 42000, 34289) + lib.tree_insert(t, 23, 456) + lib.tree_insert(t, 42, 34289) assert not lib.tree_is_cleared(t) - assert lib.tree_contains(t, 23000) - res = lib.tree_delete_item(t, 23000) + assert lib.tree_contains(t, 23) + res = lib.tree_delete_item(t, 23) assert res - assert not lib.tree_contains(t, 23000) - res = lib.tree_delete_item(t, 23000) + assert not lib.tree_contains(t, 23) + res = lib.tree_delete_item(t, 23) assert not res - res = lib.tree_delete_item(t, 21000) + res = lib.tree_delete_item(t, 21) assert not res assert not lib.tree_is_cleared(t) - assert lib.tree_contains(t, 42000) - res = lib.tree_delete_item(t, 42000) + assert lib.tree_contains(t, 42) + res = lib.tree_delete_item(t, 42) assert res assert not lib.tree_is_cleared(t) # not cleared, but still empty for i in range(100): @@ -101,18 +101,18 @@ def test_tree_walk(): t = lib.tree_create() - lib.tree_insert(t, 23000, 456) - lib.tree_insert(t, 42000, 34289) + lib.tree_insert(t, 23, 456) + lib.tree_insert(t, 42, 34289) a = ffi.new("uintptr_t[10]") res = lib.test_tree_walk(t, a) assert res == 2 - assert ((a[0] == 23000 and a[1] == 42000) or - (a[0] == 42000 and a[1] == 23000)) + assert ((a[0] == 23 and a[1] == 42) or + (a[0] == 42 and a[1] == 23)) lib.tree_free(t) def test_tree_walk_big(): t = lib.tree_create() - values = random.sample(xrange(0, 1000000, 8), 300) + values = random.sample(xrange(1, 100000), 300) for x in values: lib.tree_insert(t, x, x) a = ffi.new("uintptr_t[1000]") @@ -123,3 +123,7 @@ found.add(a[i]) assert found == set(values) lib.tree_free(t) + +def test_hash_permutation(): + hashes = [((n ^ (n << 4)) & 0xFF0) for n in range(256)] + assert set(hashes) == set(range(0, 4096, 16)) From noreply at buildbot.pypy.org Fri Mar 14 11:22:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 11:22:34 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix Message-ID: <20140314102234.100471C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1011:3c39b8d8e184 Date: 2014-03-14 11:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/3c39b8d8e184/ Log: Fix diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -28,7 +28,10 @@ extern stm_thread_local_t *_stm_tloc; extern char *_stm_nursery_current, *_stm_nursery_end; -struct stm_segment_info_s { stm_jmpbuf_t *jmpbuf_ptr; }; +struct stm_segment_info_s { + stm_jmpbuf_t *jmpbuf_ptr; /* compat only -- always NULL */ + char *nursery_current; /* compat only -- always NULL */ +}; extern struct stm_segment_info_s _stm_segment; #define STM_SEGMENT (&_stm_segment) From noreply at buildbot.pypy.org Fri Mar 14 11:26:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 11:26:19 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/3c39b8d8e184 Message-ID: <20140314102619.C8B421C06AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69950:6ec806be961b Date: 2014-03-14 11:25 +0100 http://bitbucket.org/pypy/pypy/changeset/6ec806be961b/ Log: import stmgc/3c39b8d8e184 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -c4e8d6220b74 +3c39b8d8e184 diff --git a/rpython/translator/stm/src_stm/stm/list.c b/rpython/translator/stm/src_stm/stm/list.c --- a/rpython/translator/stm/src_stm/stm/list.c +++ b/rpython/translator/stm/src_stm/stm/list.c @@ -76,7 +76,7 @@ static wlog_t *_tree_find(char *entry, uintptr_t addr) { - uintptr_t key = addr; + uintptr_t key = TREE_HASH(addr); while (((long)entry) & 1) { /* points to a further level */ key >>= TREE_BITS; @@ -123,10 +123,9 @@ static void tree_insert(struct tree_s *tree, uintptr_t addr, uintptr_t val) { assert(addr != 0); /* the NULL key is reserved */ - assert(!(addr & (sizeof(void *) - 1))); /* the key must be aligned */ retry:; wlog_t *wlog; - uintptr_t key = addr; + uintptr_t key = TREE_HASH(addr); int shift = 0; char *p = (char *)(tree->toplevel.items); char *entry; @@ -156,7 +155,7 @@ _tree_grab(tree, sizeof(wlog_node_t)); if (node == NULL) goto retry; _tree_clear_node(node); - uintptr_t key1 = wlog1->addr; + uintptr_t key1 = TREE_HASH(wlog1->addr); char *p1 = (char *)(node->items); *(wlog_t **)(p1 + ((key1 >> shift) & TREE_MASK)) = wlog1; *(char **)p = ((char *)node) + 1; diff --git a/rpython/translator/stm/src_stm/stm/list.h b/rpython/translator/stm/src_stm/stm/list.h --- a/rpython/translator/stm/src_stm/stm/list.h +++ b/rpython/translator/stm/src_stm/stm/list.h @@ -83,17 +83,19 @@ supporting very high performance in TREE_FIND in the common case where there are no or few elements in the tree, but scaling correctly if the number of items becomes large (logarithmically, rather - than almost-constant-time with hash maps, but with low constants). */ + than almost-constant-time with hash maps, but with low constants). + The value 0 cannot be used as a key. +*/ #define TREE_BITS 4 #define TREE_ARITY (1 << TREE_BITS) -#define TREE_DEPTH_MAX ((sizeof(void*)*8 - 2 + TREE_BITS-1) / TREE_BITS) -/* sizeof(void*) = total number of bits - 2 = bits that we ignore anyway (2 or 3, conservatively 2) +#define TREE_DEPTH_MAX ((sizeof(void*)*8 + TREE_BITS-1) / TREE_BITS) +/* sizeof(void*)*8 = total number of bits (x + TREE_BITS-1) / TREE_BITS = divide by TREE_BITS, rounding up */ +#define TREE_HASH(key) ((key) ^ ((key) << 4)) #define TREE_MASK ((TREE_ARITY - 1) * sizeof(void*)) typedef struct { @@ -175,7 +177,7 @@ #define TREE_FIND(tree, addr1, result, goto_not_found) \ { \ - uintptr_t _key = (addr1); \ + uintptr_t _key = TREE_HASH(addr1); \ char *_p = (char *)((tree).toplevel.items); \ char *_entry = *(char **)(_p + (_key & TREE_MASK)); \ if (_entry == NULL) \ diff --git a/rpython/translator/stm/src_stm/stm/prebuilt.c b/rpython/translator/stm/src_stm/stm/prebuilt.c --- a/rpython/translator/stm/src_stm/stm/prebuilt.c +++ b/rpython/translator/stm/src_stm/stm/prebuilt.c @@ -17,13 +17,11 @@ return; /* If the object was already moved, it is stored in 'tree_prebuilt_objs'. - For now we use this dictionary, with keys being equal to the double - of the numeric address of the prebuilt object. We double them in - order to support addresses that are only 4-byte-aligned in the - static data. + For now we use this dictionary, with keys being equal to the numeric + address of the prebuilt object. */ wlog_t *item; - TREE_FIND(*tree_prebuilt_objs, 2 * (uintptr_t)obj, item, goto not_found); + TREE_FIND(*tree_prebuilt_objs, (uintptr_t)obj, item, goto not_found); *pstaticobj_invalid = (object_t *)item->val; /* already moved */ return; @@ -43,7 +41,7 @@ nobj->stm_flags = GCFLAG_WRITE_BARRIER; /* Add the object to the tree */ - tree_insert(tree_prebuilt_objs, 2 * (uintptr_t)obj, (uintptr_t)nobj); + tree_insert(tree_prebuilt_objs, (uintptr_t)obj, (uintptr_t)nobj); /* Done */ *pstaticobj_invalid = nobj; From noreply at buildbot.pypy.org Fri Mar 14 14:28:32 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 14 Mar 2014 14:28:32 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: reset atomic on abort Message-ID: <20140314132832.2D6201D2517@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r69951:d88c0ffdda9f Date: 2014-03-14 14:29 +0100 http://bitbucket.org/pypy/pypy/changeset/d88c0ffdda9f/ Log: reset atomic on abort diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -106,6 +106,7 @@ if (pypy_stm_ready_atomic == 1) { stm_commit_transaction(); STM_START_TRANSACTION(&stm_thread_local, jmpbuf); + pypy_stm_ready_atomic = 1; /* reset after abort */ } /* After setjmp(), the local variables v_* are preserved because they From noreply at buildbot.pypy.org Fri Mar 14 14:40:00 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 14 Mar 2014 14:40:00 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: turn inevitable in commit_if_not_atomic Message-ID: <20140314134000.72C001C0124@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r69952:135462a4e323 Date: 2014-03-14 14:40 +0100 http://bitbucket.org/pypy/pypy/changeset/135462a4e323/ Log: turn inevitable in commit_if_not_atomic diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -16,11 +16,14 @@ void pypy_stm_setup_prebuilt(void); /* generated into stm_prebuilt.c */ static inline void pypy_stm_commit_if_not_atomic(void) { + int e = errno; if (pypy_stm_ready_atomic == 1) { - int e = errno; stm_commit_transaction(); - errno = e; } + else { + _stm_become_inevitable("commit_if_not_atomic in atomic"); + } + errno = e; } static inline void pypy_stm_start_inevitable_if_not_atomic(void) { if (pypy_stm_ready_atomic == 1) { From noreply at buildbot.pypy.org Fri Mar 14 14:43:02 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 14 Mar 2014 14:43:02 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fix Message-ID: <20140314134302.AB1E11C0124@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r69953:1f5ecae33a88 Date: 2014-03-14 14:43 +0100 http://bitbucket.org/pypy/pypy/changeset/1f5ecae33a88/ Log: fix diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -21,7 +21,7 @@ stm_commit_transaction(); } else { - _stm_become_inevitable("commit_if_not_atomic in atomic"); + stm_become_inevitable("commit_if_not_atomic in atomic"); } errno = e; } From noreply at buildbot.pypy.org Fri Mar 14 14:49:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 14:49:48 +0100 (CET) Subject: [pypy-commit] stmgc default: Add an assert Message-ID: <20140314134948.708AE1C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1012:ce38b8d4a1f2 Date: 2014-03-14 14:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/ce38b8d4a1f2/ Log: Add an assert diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -301,6 +301,7 @@ static void enter_safe_point_if_requested(void) { + assert(_running_transaction()); assert(_has_mutex()); while (1) { if (must_abort()) From noreply at buildbot.pypy.org Fri Mar 14 14:51:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 14:51:59 +0100 (CET) Subject: [pypy-commit] stmgc default: Rename this function to make it clear that it's not giving Message-ID: <20140314135159.A4EC31C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1013:f0caf6780ab6 Date: 2014-03-14 14:51 +0100 http://bitbucket.org/pypy/stmgc/changeset/f0caf6780ab6/ Log: Rename this function to make it clear that it's not giving a very reliable answer in case the real answer is No diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -11,7 +11,7 @@ void _stm_write_slowpath(object_t *obj) { - assert(_running_transaction()); + assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); /* is this an object from the same transaction, outside the nursery? */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -204,7 +204,7 @@ } static bool _is_tl_registered(stm_thread_local_t *tl); -static bool _running_transaction(void); +static bool _seems_to_be_running_transaction(void); static void teardown_core(void); static void abort_with_mutex(void) __attribute__((noreturn)); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -213,7 +213,7 @@ } __attribute__((unused)) -static bool _running_transaction(void) +static bool _seems_to_be_running_transaction(void) { return (STM_SEGMENT->running_thread != NULL); } @@ -301,7 +301,7 @@ static void enter_safe_point_if_requested(void) { - assert(_running_transaction()); + assert(_seems_to_be_running_transaction()); assert(_has_mutex()); while (1) { if (must_abort()) From noreply at buildbot.pypy.org Fri Mar 14 15:15:41 2014 From: noreply at buildbot.pypy.org (hakanardo) Date: Fri, 14 Mar 2014 15:15:41 +0100 (CET) Subject: [pypy-commit] pypy default: failing test Message-ID: <20140314141541.5D4B91C06AD@cobra.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r69954:f51b42dd2ed4 Date: 2014-03-14 15:14 +0100 http://bitbucket.org/pypy/pypy/changeset/f51b42dd2ed4/ Log: failing test diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5433,6 +5433,23 @@ jump(i0) """ self.optimize_loop(ops, expected) + + def test_hippyvm_unroll_bug(self): + ops = """ + [p0, i1, i2] + i3 = int_add(i1, 1) + i4 = int_eq(i3, i2) + setfield_gc(p0, i4, descr=valuedescr) + jump(p0, i3, i2) + """ + expected = """ + [p0, i1, i2] + i3 = int_add(i1, 1) + i4 = int_eq(i3, i2) + setfield_gc(p0, i4, descr=valuedescr) + jump(p0, i3, i2) + """ + self.optimize_loop(ops, expected) class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -8253,6 +8253,23 @@ """ self.optimize_loop(ops, expected) + def test_hippyvm_unroll_bug(self): + ops = """ + [p0, i1, i2] + i3 = int_add(i1, 1) + i4 = int_eq(i3, i2) + setfield_gc(p0, i4, descr=valuedescr) + jump(p0, i3, i2) + """ + expected = """ + [p0, i1, i2] + i3 = int_add(i1, 1) + i4 = int_eq(i3, i2) + setfield_gc(p0, i4, descr=valuedescr) + jump(p0, i3, i2) + """ + self.optimize_loop(ops, expected) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass From noreply at buildbot.pypy.org Fri Mar 14 15:57:33 2014 From: noreply at buildbot.pypy.org (hakanardo) Date: Fri, 14 Mar 2014 15:57:33 +0100 (CET) Subject: [pypy-commit] pypy default: simplified tests and renamed the basic case to something more apropriate Message-ID: <20140314145733.B69BC1D27BD@cobra.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r69955:74e8e981ffa1 Date: 2014-03-14 15:56 +0100 http://bitbucket.org/pypy/pypy/changeset/74e8e981ffa1/ Log: simplified tests and renamed the basic case to something more apropriate diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1660,6 +1660,16 @@ """ self.optimize_loop(ops, ops) + def test_setfield_int_eq_result(self): + # test that the setfield_gc does not end up before int_eq + ops = """ + [p1, i1, i2] + i3 = int_eq(i1, i2) + setfield_gc(p1, i3, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, ops) + def test_duplicate_setfield_aliasing(self): # a case where aliasing issues (and not enough cleverness) mean # that we fail to remove any setfield_gc @@ -5434,23 +5444,5 @@ """ self.optimize_loop(ops, expected) - def test_hippyvm_unroll_bug(self): - ops = """ - [p0, i1, i2] - i3 = int_add(i1, 1) - i4 = int_eq(i3, i2) - setfield_gc(p0, i4, descr=valuedescr) - jump(p0, i3, i2) - """ - expected = """ - [p0, i1, i2] - i3 = int_add(i1, 1) - i4 = int_eq(i3, i2) - setfield_gc(p0, i4, descr=valuedescr) - jump(p0, i3, i2) - """ - self.optimize_loop(ops, expected) - - class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -8261,14 +8261,7 @@ setfield_gc(p0, i4, descr=valuedescr) jump(p0, i3, i2) """ - expected = """ - [p0, i1, i2] - i3 = int_add(i1, 1) - i4 = int_eq(i3, i2) - setfield_gc(p0, i4, descr=valuedescr) - jump(p0, i3, i2) - """ - self.optimize_loop(ops, expected) + self.optimize_loop(ops, ops) class TestLLtype(OptimizeOptTest, LLtypeMixin): pass From noreply at buildbot.pypy.org Fri Mar 14 16:09:09 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 14 Mar 2014 16:09:09 +0100 (CET) Subject: [pypy-commit] pypy default: use EOF instead of a response file Message-ID: <20140314150909.EEECA1C0124@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69956:0a027b95e015 Date: 2014-03-14 15:52 +0200 http://bitbucket.org/pypy/pypy/changeset/0a027b95e015/ Log: use EOF instead of a response file diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -369,32 +369,21 @@ for rule in rules: m.rule(*rule) - objects = ' $(OBJECTS)' - create_obj_response_file = [] - if len(' '.join(rel_ofiles)) > 4000: - # cmd.exe has a limit of ~4000 characters before a command line is too long. - # Use a response file instead, at the cost of making the Makefile very ugly. - for i in range(len(rel_ofiles) - 1): - create_obj_response_file.append('echo %s >> obj_names.rsp' % \ - rel_ofiles[i]) - # use cmd /c for the last one so that the file is flushed - create_obj_response_file.append('cmd /c echo %s >> obj_names.rsp' % \ - rel_ofiles[-1]) - objects = ' @obj_names.rsp' if self.version < 80: m.rule('$(TARGET)', '$(OBJECTS)', - create_obj_response_file + [\ - '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' /out:$@ $(LIBDIRS) $(LIBS)', + [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA) /out:$@' +\ + ' $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', ]) else: m.rule('$(TARGET)', '$(OBJECTS)', - create_obj_response_file + [\ - '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST /MANIFESTFILE:$*.manifest', + [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + \ + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST' + \ + ' /MANIFESTFILE:$*.manifest @<<\n$(OBJECTS)\n<<', 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', ]) m.rule('debugmode_$(TARGET)', '$(OBJECTS)', - create_obj_response_file + [\ - '$(CC_LINK) /nologo /DEBUG $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS)', + [ '$(CC_LINK) /nologo /DEBUG $(LDFLAGS) $(LDFLAGSEXTRA)' + \ + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', ]) if shared: From noreply at buildbot.pypy.org Fri Mar 14 16:09:11 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 14 Mar 2014 16:09:11 +0100 (CET) Subject: [pypy-commit] pypy default: fix link error on untranslated win32 tests, only to discover tests hang on rffi cast in from_ref() in pyobject Message-ID: <20140314150911.2B0BA1C0124@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69957:9ec1c621f45c Date: 2014-03-14 17:05 +0200 http://bitbucket.org/pypy/pypy/changeset/9ec1c621f45c/ Log: fix link error on untranslated win32 tests, only to discover tests hang on rffi cast in from_ref() in pyobject diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,6 +64,8 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] + # prevent linking with python27.lib + kwds["compile_extra"].append("/DPy_BUILD_CORE") elif sys.platform == 'darwin': kwds["link_files"] = [str(api_library + '.dylib')] else: From noreply at buildbot.pypy.org Fri Mar 14 16:29:18 2014 From: noreply at buildbot.pypy.org (hakanardo) Date: Fri, 14 Mar 2014 16:29:18 +0100 (CET) Subject: [pypy-commit] pypy default: fix for tests in 74e8e981ffa1 and f51b42dd2ed4 Message-ID: <20140314152918.C73951C0124@cobra.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r69958:40052bc7329e Date: 2014-03-14 16:27 +0100 http://bitbucket.org/pypy/pypy/changeset/40052bc7329e/ Log: fix for tests in 74e8e981ffa1 and f51b42dd2ed4 diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -93,6 +93,11 @@ # possible aliasing). self.clear() self._lazy_setfield = None + if optheap.postponed_op: + for a in op.getarglist(): + if a is optheap.postponed_op.result: + optheap.emit_postponed_op() + break optheap.next_optimization.propagate_forward(op) if not can_cache: return @@ -179,6 +184,9 @@ def flush(self): self.force_all_lazy_setfields_and_arrayitems() + self.emit_postponed_op() + + def emit_postponed_op(self): if self.postponed_op: postponed_op = self.postponed_op self.postponed_op = None @@ -227,10 +235,7 @@ def emit_operation(self, op): self.emitting_operation(op) - if self.postponed_op: - postponed_op = self.postponed_op - self.postponed_op = None - self.next_optimization.propagate_forward(postponed_op) + self.emit_postponed_op() if (op.is_comparison() or op.getopnum() == rop.CALL_MAY_FORCE or op.is_ovf()): self.postponed_op = op From noreply at buildbot.pypy.org Fri Mar 14 16:29:20 2014 From: noreply at buildbot.pypy.org (hakanardo) Date: Fri, 14 Mar 2014 16:29:20 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20140314152920.0D5FE1C0124@cobra.cs.uni-duesseldorf.de> Author: Hakan Ardo Branch: Changeset: r69959:b54c3c50424f Date: 2014-03-14 16:28 +0100 http://bitbucket.org/pypy/pypy/changeset/b54c3c50424f/ Log: merge diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,6 +64,8 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] + # prevent linking with python27.lib + kwds["compile_extra"].append("/DPy_BUILD_CORE") elif sys.platform == 'darwin': kwds["link_files"] = [str(api_library + '.dylib')] else: diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -369,32 +369,21 @@ for rule in rules: m.rule(*rule) - objects = ' $(OBJECTS)' - create_obj_response_file = [] - if len(' '.join(rel_ofiles)) > 4000: - # cmd.exe has a limit of ~4000 characters before a command line is too long. - # Use a response file instead, at the cost of making the Makefile very ugly. - for i in range(len(rel_ofiles) - 1): - create_obj_response_file.append('echo %s >> obj_names.rsp' % \ - rel_ofiles[i]) - # use cmd /c for the last one so that the file is flushed - create_obj_response_file.append('cmd /c echo %s >> obj_names.rsp' % \ - rel_ofiles[-1]) - objects = ' @obj_names.rsp' if self.version < 80: m.rule('$(TARGET)', '$(OBJECTS)', - create_obj_response_file + [\ - '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' /out:$@ $(LIBDIRS) $(LIBS)', + [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA) /out:$@' +\ + ' $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', ]) else: m.rule('$(TARGET)', '$(OBJECTS)', - create_obj_response_file + [\ - '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST /MANIFESTFILE:$*.manifest', + [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + \ + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST' + \ + ' /MANIFESTFILE:$*.manifest @<<\n$(OBJECTS)\n<<', 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', ]) m.rule('debugmode_$(TARGET)', '$(OBJECTS)', - create_obj_response_file + [\ - '$(CC_LINK) /nologo /DEBUG $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS)', + [ '$(CC_LINK) /nologo /DEBUG $(LDFLAGS) $(LDFLAGSEXTRA)' + \ + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', ]) if shared: From noreply at buildbot.pypy.org Fri Mar 14 16:57:23 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 14 Mar 2014 16:57:23 +0100 (CET) Subject: [pypy-commit] stmgc default: food for thought Message-ID: <20140314155723.1C1141C0124@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1014:fe2bab9ed4dd Date: 2014-03-14 16:58 +0100 http://bitbucket.org/pypy/stmgc/changeset/fe2bab9ed4dd/ Log: food for thought diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -144,6 +144,48 @@ assert stm_get_weakref(lp1) == ffi.NULL +class TestIsolation(BaseTest): + def test_not_break(self): + lpold = stm_allocate_old_refs(1) + + self.start_transaction() + toref = stm_allocate(16) + weakref = stm_allocate_weakref(toref) + stm_set_ref(lpold, 0, weakref) + self.push_root(toref) + self.commit_transaction() + # toref survives because it's on our shadowstack + # weakref is in a prebuilt old object + + # get the new toref reference (old now) + toref = self.pop_root() + self.push_root(toref) + + self.switch(1) + # in another thread, we start a transaction + + self.start_transaction() + weakref = stm_get_ref(lpold, 0) + # everyone is still alive, so we still have toref in the weakref: + assert stm_get_weakref(weakref) == toref + + + self.switch(0) + + # back in thread 0, we pop toref from the shadowstack + # in an inevitable transaction + self.start_transaction() + stm_become_inevitable() + self.pop_root() # forget toref + stm_major_collect() + + self.switch(1) + + # toref should still be alive and stored in the + # weakref... + # should not be NULLed, otherwise this means we can + # communicate between the inevitable and this transaction! + assert stm_get_weakref(weakref) == toref class TestMajorCollection(BaseTest): From noreply at buildbot.pypy.org Fri Mar 14 17:07:05 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 14 Mar 2014 17:07:05 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: don't add stm_read for immutable operations (still very bad) Message-ID: <20140314160705.D69C21C0124@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r69960:955bfd905138 Date: 2014-03-14 17:07 +0100 http://bitbucket.org/pypy/pypy/changeset/955bfd905138/ Log: don't add stm_read for immutable operations (still very bad) diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -1,4 +1,4 @@ -from rpython.flowspace.model import SpaceOperation +from rpython.flowspace.model import SpaceOperation, Constant, Variable from rpython.translator.unsimplify import varoftype from rpython.rtyper.lltypesystem import lltype @@ -9,6 +9,31 @@ def is_gc_ptr(T): return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc' +def unwraplist(list_v): + for v in list_v: + if isinstance(v, Constant): + yield v.value + elif isinstance(v, Variable): + yield None # unknown + else: + raise AssertionError(v) + +def is_immutable(op): + if op.opname in ('getfield', 'setfield'): + STRUCT = op.args[0].concretetype.TO + return STRUCT._immutable_field(op.args[1].value) + if op.opname in ('getarrayitem', 'setarrayitem'): + ARRAY = op.args[0].concretetype.TO + return ARRAY._immutable_field() + if op.opname == 'getinteriorfield': + OUTER = op.args[0].concretetype.TO + return OUTER._immutable_interiorfield(unwraplist(op.args[1:])) + if op.opname == 'setinteriorfield': + OUTER = op.args[0].concretetype.TO + return OUTER._immutable_interiorfield(unwraplist(op.args[1:-1])) + if op.opname in ('raw_load', 'raw_store'): + return False + def insert_stm_read_barrier(transformer, graph): # We need to put enough 'stm_read' in the graph so that any @@ -19,13 +44,29 @@ # XXX this can be optimized a lot, but for now we go with the # simplest possible solution... # + gcremovetypeptr = transformer.translator.config.translation.gcremovetypeptr + for block in graph.iterblocks(): if not block.operations: continue newops = [] stm_ignored = False for op in block.operations: - if op.opname in READ_OPS and is_gc_ptr(op.args[0].concretetype): + is_getter = (op.opname in READ_OPS and + op.result.concretetype is not lltype.Void and + is_gc_ptr(op.args[0].concretetype)) + + if (gcremovetypeptr and op.opname in ('getfield', 'setfield') and + op.args[1].value == 'typeptr' and + op.args[0].concretetype.TO._hints.get('typeptr')): + # typeptr is always immutable + pass + elif ((op.opname in ('getarraysize', 'getinteriorarraysize', 'weakref_deref') and + is_gc_ptr(op.args[0].concretetype)) or + (is_getter and is_immutable(op))): + # immutable getters + pass + elif is_getter: if not stm_ignored: v_none = varoftype(lltype.Void) newops.append(SpaceOperation('stm_read', From noreply at buildbot.pypy.org Fri Mar 14 17:40:50 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 14 Mar 2014 17:40:50 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: remove 'weakref_deref' from stm_read-exceptions Message-ID: <20140314164050.6F0331C0124@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r69961:71769371cb0b Date: 2014-03-14 17:41 +0100 http://bitbucket.org/pypy/pypy/changeset/71769371cb0b/ Log: remove 'weakref_deref' from stm_read-exceptions diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -61,10 +61,13 @@ op.args[0].concretetype.TO._hints.get('typeptr')): # typeptr is always immutable pass - elif ((op.opname in ('getarraysize', 'getinteriorarraysize', 'weakref_deref') and + elif ((op.opname in ('getarraysize', 'getinteriorarraysize') and is_gc_ptr(op.args[0].concretetype)) or (is_getter and is_immutable(op))): # immutable getters + # 'weakref_deref': kind of immutable, but the GC has to see + # which transactions read from a dying weakref, so we + # need the barrier nonetheless... pass elif is_getter: if not stm_ignored: From noreply at buildbot.pypy.org Fri Mar 14 18:42:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 18:42:41 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: Update to mention installing other modules (and NumPy) in the download page Message-ID: <20140314174241.1A03A1C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r477:8537665bac0e Date: 2014-03-14 18:42 +0100 http://bitbucket.org/pypy/pypy.org/changeset/8537665bac0e/ Log: Update to mention installing other modules (and NumPy) in the download page diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -61,6 +61,8 @@
  • Installing (optional)
  • +
  • Installing more modules
  • +
  • Installing NumPy (optional)
  • Building from source
  • Packaging
  • Checksums
  • @@ -160,6 +162,32 @@ not move or copy the executable pypy outside the tree – put a symlink to it, otherwise it will not find its libraries.

    +
    +

    Installing more modules

    +

    The recommended way is to install pip, which is the standard package +manager of Python. It works like it does on CPython. One practical +difference, though, is that it usually comes pre-packaged for you when +you get CPython from a place like your Linux distribution. In the case +of PyPy (or CPython if you download it from http://www.python.org/), +you need to get it separately, as explained in our FAQ.

    +
    +
    +

    Installing NumPy

    +

    NumPy is an exception to the rule that most packages work without +changes. The “numpy” module needs to be installed from our own +repository rather than from the official source.

    +

    If you have pip (the command-line assumes that it finds the pip +belonging to PyPy, not the one from CPython):

    +
    +pip install git+https://bitbucket.org/pypy/numpy.git
    +
    +

    Alternatively, the direct way:

    +
    +git clone https://bitbucket.org/pypy/numpy.git
    +cd numpy
    +pypy setup.py install
    +
    +

    Building from source

      diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -32,6 +32,8 @@ * `Other versions`_ * `Installing`_ (optional) + * `Installing more modules`_ + * `Installing NumPy`_ (optional) * `Building from source`_ * `Packaging`_ * `Checksums`_ @@ -182,6 +184,40 @@ a symlink to it, otherwise it will not find its libraries. +Installing more modules +------------------------------- + +The recommended way is to install ``pip``, which is the standard package +manager of Python. It works like it does on CPython. One practical +difference, though, is that it usually comes pre-packaged for you when +you get CPython from a place like your Linux distribution. In the case +of PyPy (or CPython if you download it from http://www.python.org/), +you need to get it separately, as explained `in our FAQ.`__ + +.. __: http://pypy.readthedocs.org/en/latest/faq.html#module-xyz-does-not-work-with-pypy-importerror + + +Installing NumPy +------------------------------- + +NumPy is an exception to the rule that most packages work without +changes. The "numpy" module needs to be installed from `our own +repository`__ rather than from the official source. + +.. __: https://bitbucket.org/pypy/numpy + +If you have pip (the command-line assumes that it finds the pip +belonging to PyPy, not the one from CPython):: + + pip install git+https://bitbucket.org/pypy/numpy.git + +Alternatively, the direct way:: + + git clone https://bitbucket.org/pypy/numpy.git + cd numpy + pypy setup.py install + + .. _translate: Building from source From noreply at buildbot.pypy.org Fri Mar 14 20:17:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 14 Mar 2014 20:17:11 +0100 (CET) Subject: [pypy-commit] pypy default: py3k compat: avoid cmp Message-ID: <20140314191711.8259E1C11A4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69962:66662b92489d Date: 2014-03-14 12:16 -0700 http://bitbucket.org/pypy/pypy/changeset/66662b92489d/ Log: py3k compat: avoid cmp diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -778,7 +778,7 @@ try: ret = callback(text1, text2) assert isinstance(ret, (int, long)) - return cmp(ret, 0) + return (ret > 0) - (ret < 0) except Exception: return 0 From noreply at buildbot.pypy.org Fri Mar 14 20:37:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 14 Mar 2014 20:37:53 +0100 (CET) Subject: [pypy-commit] pypy default: still use cmp builtin on py2, only apply manual case for py3 Message-ID: <20140314193753.35F781C3058@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69963:ddbc70485efb Date: 2014-03-14 15:36 -0400 http://bitbucket.org/pypy/pypy/changeset/ddbc70485efb/ Log: still use cmp builtin on py2, only apply manual case for py3 diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -38,6 +38,7 @@ if sys.version_info[0] >= 3: StandardError = Exception + cmp = lambda x, y: (x > y) - (x < y) long = int xrange = range basestring = unicode = str @@ -778,7 +779,7 @@ try: ret = callback(text1, text2) assert isinstance(ret, (int, long)) - return (ret > 0) - (ret < 0) + return cmp(ret, 0) except Exception: return 0 From noreply at buildbot.pypy.org Fri Mar 14 22:02:27 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 14 Mar 2014 22:02:27 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140314210227.9D27F1C06AD@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69964:d5b78e64b2f0 Date: 2014-03-14 16:58 -0400 http://bitbucket.org/pypy/pypy/changeset/d5b78e64b2f0/ Log: cleanup diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1,14 +1,15 @@ import py from rpython.rlib.objectmodel import instantiate +from rpython.jit.metainterp import compile, resume +from rpython.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt, TreeLoop +from rpython.jit.metainterp.optimize import InvalidLoop +from rpython.jit.metainterp.optimizeopt import build_opt_chain from rpython.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin, BaseTest, convert_old_style_to_targets) -from rpython.jit.metainterp.optimizeopt import build_opt_chain -from rpython.jit.metainterp.optimize import InvalidLoop -from rpython.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt -from rpython.jit.metainterp.history import TreeLoop -from rpython.jit.metainterp import compile, resume +from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import \ + FakeMetaInterpStaticData from rpython.jit.metainterp.resoperation import rop, opname, oparity -from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import FakeMetaInterpStaticData + def test_build_opt_chain(): def check(chain, expected_names): @@ -40,7 +41,6 @@ class BaseTestWithUnroll(BaseTest): - enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" def optimize_loop(self, ops, expected, expected_preamble=None, @@ -93,8 +93,8 @@ def raises(self, e, fn, *args): return py.test.raises(e, fn, *args).value + class OptimizeOptTest(BaseTestWithUnroll): - def setup_method(self, meth=None): class FailDescr(compile.ResumeGuardDescr): oparse = None @@ -130,7 +130,6 @@ self.namespace.pop('fdescr', None) self.namespace.pop('fdescr2', None) - def test_simple(self): ops = """ [] @@ -974,7 +973,6 @@ """ self.optimize_loop(ops, expected, preamble) - # ---------- def test_virtual_1(self): @@ -1252,7 +1250,6 @@ """ self.optimize_loop(ops, expected, preamble) - def test_virtual_constant_isnonnull(self): ops = """ [i0] @@ -2789,8 +2786,7 @@ p2 = new_with_vtable(ConstClass(node_vtable)) jump(p2) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + self.raises(InvalidLoop, self.optimize_loop, ops, "crash!") def test_invalid_loop_2(self): ops = """ @@ -2801,8 +2797,7 @@ escape(p2) # prevent it from staying Virtual jump(p2) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + self.raises(InvalidLoop, self.optimize_loop, ops, "crash!") def test_invalid_loop_3(self): ops = """ @@ -2824,8 +2819,7 @@ guard_value(p2, ConstPtr(myptr)) [] jump(p2) """ - exc = self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + exc = self.raises(InvalidLoop, self.optimize_loop, ops, "crash!") if exc: assert "node" in exc.msg @@ -3151,7 +3145,6 @@ """ self.optimize_loop(ops, expected) - def test_int_and_or_with_zero(self): ops = """ [i0, i1] @@ -5107,7 +5100,6 @@ """ self.optimize_loop(ops, expected) - def test_division_nonneg(self): py.test.skip("harder") # this is how an app-level division turns into right now @@ -5444,7 +5436,6 @@ """ self.optimize_loop(ops, ops, ops) - def test_mul_ovf(self): ops = """ [i0, i1] @@ -5591,7 +5582,6 @@ def is_integer_bounded(self): return False - for n in ('inst_w_seq', 'inst_index', 'inst_w_list', 'inst_length', 'inst_start', 'inst_step'): self.namespace[n] = FakeDescr(n) @@ -5847,7 +5837,7 @@ self.optimize_loop(ops, optops, preamble) # check with replacing 'str' with 'unicode' everywhere def r(s): - return s.replace('str','unicode').replace('s"', 'u"') + return s.replace('str', 'unicode').replace('s"', 'u"') self.optimize_loop(r(ops), r(optops), r(preamble)) def test_newstr_1(self): @@ -6277,7 +6267,7 @@ if isinstance(value, calldescrtype): extra = value.get_extra_info() if (extra and isinstance(extra, effectinfotype) and - extra.oopspecindex == oopspecindex): + extra.oopspecindex == oopspecindex): # returns 0 for 'func' in this test return value, 0 raise AssertionError("not found: oopspecindex=%d" % @@ -7395,7 +7385,6 @@ """ self.optimize_loop(ops, expected, expected_short=short) - def test_loopinvariant_constant_strgetitem(self): ops = """ [p0] @@ -7454,7 +7443,7 @@ """ self.optimize_loop(ops, expected, expected_short=short) - def test_propagate_virtual_arryalen(self): + def test_propagate_virtual_arraylen(self): ops = """ [p0] p404 = new_array(2, descr=arraydescr) @@ -7831,7 +7820,6 @@ """ self.optimize_loop(ops, expected) - def test_setarrayitem_followed_by_arraycopy(self): ops = """ [p1, p2] @@ -8124,7 +8112,6 @@ """ self.optimize_loop(ops, expected) - def test_issue1080_infinitie_loop_simple(self): ops = """ [p69] @@ -8149,8 +8136,7 @@ guard_value(p1, ConstPtr(myptr)) [] jump(p1) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, ops) + self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_licm_boxed_opaque_getitem(self): ops = """ @@ -8225,8 +8211,7 @@ guard_value(p1, ConstPtr(myptr)) [] jump(p1) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, ops) + self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_cond_call_with_a_constant(self): ops = """ @@ -8263,6 +8248,6 @@ """ self.optimize_loop(ops, ops) + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass - From noreply at buildbot.pypy.org Fri Mar 14 22:39:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Mar 2014 22:39:37 +0100 (CET) Subject: [pypy-commit] stmgc default: Add ((always_inline)) on the three major places that *must* be inlined Message-ID: <20140314213937.156311C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1015:240faf17faf5 Date: 2014-03-14 22:39 +0100 http://bitbucket.org/pypy/stmgc/changeset/240faf17faf5/ Log: Add ((always_inline)) on the three major places that *must* be inlined diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -147,6 +147,7 @@ stm_write() is called, or immediately after getting the object from stm_allocate(), as long as the rules above are respected. */ +__attribute__((always_inline)) static inline void stm_read(object_t *obj) { ((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm = @@ -158,6 +159,7 @@ the next one, then stm_write() needs to be called again. It is not necessary to call it immediately after stm_allocate(). */ +__attribute__((always_inline)) static inline void stm_write(object_t *obj) { if (UNLIKELY((obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) != 0)) @@ -177,6 +179,7 @@ of 8 and at least 16. In the fast-path, this is inlined to just a few assembler instructions. */ +__attribute__((always_inline)) static inline object_t *stm_allocate(ssize_t size_rounded_up) { OPT_ASSERT(size_rounded_up >= 16); diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -52,6 +52,7 @@ object_t *_stm_allocate_slowpath(ssize_t); object_t *stm_allocate_weakref(ssize_t size_rounded_up); +__attribute__((always_inline)) inline static object_t *stm_allocate(ssize_t size_rounded_up) { OPT_ASSERT(size_rounded_up >= 16); OPT_ASSERT((size_rounded_up & 7) == 0); @@ -101,6 +102,7 @@ void _stm_write_slowpath(object_t *); +__attribute__((always_inline)) inline static void stm_write(object_t *ob) { if (UNLIKELY(ob->gil_flags & _STM_GCFLAG_WRITE_BARRIER)) _stm_write_slowpath(ob); From noreply at buildbot.pypy.org Sat Mar 15 01:48:16 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 15 Mar 2014 01:48:16 +0100 (CET) Subject: [pypy-commit] pypy stdlib-3.2.5: merge vendor/stdlib Message-ID: <20140315004816.EA9A61C3058@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r69965:cbb08b3b1f21 Date: 2014-03-14 17:40 -0700 http://bitbucket.org/pypy/pypy/changeset/cbb08b3b1f21/ Log: merge vendor/stdlib diff too long, truncating to 2000 out of 27211 lines diff --git a/lib-python/3/__future__.py b/lib-python/3/__future__.py --- a/lib-python/3/__future__.py +++ b/lib-python/3/__future__.py @@ -114,7 +114,7 @@ CO_FUTURE_DIVISION) absolute_import = _Feature((2, 5, 0, "alpha", 1), - (2, 7, 0, "alpha", 0), + (3, 0, 0, "alpha", 0), CO_FUTURE_ABSOLUTE_IMPORT) with_statement = _Feature((2, 5, 0, "alpha", 1), diff --git a/lib-python/3/_abcoll.py b/lib-python/3/_abcoll.py --- a/lib-python/3/_abcoll.py +++ b/lib-python/3/_abcoll.py @@ -184,12 +184,12 @@ def __gt__(self, other): if not isinstance(other, Set): return NotImplemented - return other < self + return other.__lt__(self) def __ge__(self, other): if not isinstance(other, Set): return NotImplemented - return other <= self + return other.__le__(self) def __eq__(self, other): if not isinstance(other, Set): diff --git a/lib-python/3/_osx_support.py b/lib-python/3/_osx_support.py new file mode 100644 --- /dev/null +++ b/lib-python/3/_osx_support.py @@ -0,0 +1,488 @@ +"""Shared OS X support functions.""" + +import os +import re +import sys + +__all__ = [ + 'compiler_fixup', + 'customize_config_vars', + 'customize_compiler', + 'get_platform_osx', +] + +# configuration variables that may contain universal build flags, +# like "-arch" or "-isdkroot", that may need customization for +# the user environment +_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS', + 'BLDSHARED', 'LDSHARED', 'CC', 'CXX', + 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS', + 'PY_CORE_CFLAGS') + +# configuration variables that may contain compiler calls +_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX') + +# prefix added to original configuration variable names +_INITPRE = '_OSX_SUPPORT_INITIAL_' + + +def _find_executable(executable, path=None): + """Tries to find 'executable' in the directories listed in 'path'. + + A string listing directories separated by 'os.pathsep'; defaults to + os.environ['PATH']. Returns the complete filename or None if not found. + """ + if path is None: + path = os.environ['PATH'] + + paths = path.split(os.pathsep) + base, ext = os.path.splitext(executable) + + if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'): + executable = executable + '.exe' + + if not os.path.isfile(executable): + for p in paths: + f = os.path.join(p, executable) + if os.path.isfile(f): + # the file exists, we have a shot at spawn working + return f + return None + else: + return executable + + +def _read_output(commandstring): + """Output from succesful command execution or None""" + # Similar to os.popen(commandstring, "r").read(), + # but without actually using os.popen because that + # function is not usable during python bootstrap. + # tempfile is also not available then. + import contextlib + try: + import tempfile + fp = tempfile.NamedTemporaryFile() + except ImportError: + fp = open("/tmp/_osx_support.%s"%( + os.getpid(),), "w+b") + + with contextlib.closing(fp) as fp: + cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) + return fp.read().decode('utf-8').strip() if not os.system(cmd) else None + + +def _find_build_tool(toolname): + """Find a build tool on current path or using xcrun""" + return (_find_executable(toolname) + or _read_output("/usr/bin/xcrun -find %s" % (toolname,)) + or '' + ) + +_SYSTEM_VERSION = None + +def _get_system_version(): + """Return the OS X system version as a string""" + # Reading this plist is a documented way to get the system + # version (see the documentation for the Gestalt Manager) + # We avoid using platform.mac_ver to avoid possible bootstrap issues during + # the build of Python itself (distutils is used to build standard library + # extensions). + + global _SYSTEM_VERSION + + if _SYSTEM_VERSION is None: + _SYSTEM_VERSION = '' + try: + f = open('/System/Library/CoreServices/SystemVersion.plist') + except IOError: + # We're on a plain darwin box, fall back to the default + # behaviour. + pass + else: + try: + m = re.search(r'ProductUserVisibleVersion\s*' + r'(.*?)', f.read()) + finally: + f.close() + if m is not None: + _SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + + return _SYSTEM_VERSION + +def _remove_original_values(_config_vars): + """Remove original unmodified values for testing""" + # This is needed for higher-level cross-platform tests of get_platform. + for k in list(_config_vars): + if k.startswith(_INITPRE): + del _config_vars[k] + +def _save_modified_value(_config_vars, cv, newvalue): + """Save modified and original unmodified value of configuration var""" + + oldvalue = _config_vars.get(cv, '') + if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars): + _config_vars[_INITPRE + cv] = oldvalue + _config_vars[cv] = newvalue + +def _supports_universal_builds(): + """Returns True if universal builds are supported on this system""" + # As an approximation, we assume that if we are running on 10.4 or above, + # then we are running with an Xcode environment that supports universal + # builds, in particular -isysroot and -arch arguments to the compiler. This + # is in support of allowing 10.4 universal builds to run on 10.3.x systems. + + osx_version = _get_system_version() + if osx_version: + try: + osx_version = tuple(int(i) for i in osx_version.split('.')) + except ValueError: + osx_version = '' + return bool(osx_version >= (10, 4)) if osx_version else False + + +def _find_appropriate_compiler(_config_vars): + """Find appropriate C compiler for extension module builds""" + + # Issue #13590: + # The OSX location for the compiler varies between OSX + # (or rather Xcode) releases. With older releases (up-to 10.5) + # the compiler is in /usr/bin, with newer releases the compiler + # can only be found inside Xcode.app if the "Command Line Tools" + # are not installed. + # + # Futhermore, the compiler that can be used varies between + # Xcode releases. Upto Xcode 4 it was possible to use 'gcc-4.2' + # as the compiler, after that 'clang' should be used because + # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that + # miscompiles Python. + + # skip checks if the compiler was overriden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + # The CC config var might contain additional arguments. + # Ignore them while searching. + cc = oldcc = _config_vars['CC'].split()[0] + if not _find_executable(cc): + # Compiler is not found on the shell search PATH. + # Now search for clang, first on PATH (if the Command LIne + # Tools have been installed in / or if the user has provided + # another location via CC). If not found, try using xcrun + # to find an uninstalled clang (within a selected Xcode). + + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself (and os.popen is + # implemented on top of subprocess and is therefore not + # usable as well) + + cc = _find_build_tool('clang') + + elif os.path.basename(cc).startswith('gcc'): + # Compiler is GCC, check if it is LLVM-GCC + data = _read_output("'%s' --version" + % (cc.replace("'", "'\"'\"'"),)) + if 'llvm-gcc' in data: + # Found LLVM-GCC, fall back to clang + cc = _find_build_tool('clang') + + if not cc: + raise SystemError( + "Cannot locate working compiler") + + if cc != oldcc: + # Found a replacement compiler. + # Modify config vars using new compiler, if not already explictly + # overriden by an env variable, preserving additional arguments. + for cv in _COMPILER_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + cv_split = _config_vars[cv].split() + cv_split[0] = cc if cv != 'CXX' else cc + '++' + _save_modified_value(_config_vars, cv, ' '.join(cv_split)) + + return _config_vars + + +def _remove_universal_flags(_config_vars): + """Remove all universal build arguments from config vars""" + + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overriden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) + flags = re.sub('-isysroot [^ \t]*', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _remove_unsupported_archs(_config_vars): + """Remove any unsupported archs from config vars""" + # Different Xcode releases support different sets for '-arch' + # flags. In particular, Xcode 4.x no longer supports the + # PPC architectures. + # + # This code automatically removes '-arch ppc' and '-arch ppc64' + # when these are not supported. That makes it possible to + # build extensions on OSX 10.7 and later with the prebuilt + # 32-bit installer on the python.org website. + + # skip checks if the compiler was overriden with a CC env variable + if 'CC' in os.environ: + return _config_vars + + if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None: + # NOTE: Cannot use subprocess here because of bootstrap + # issues when building Python itself + status = os.system("'%s' -arch ppc -x c /dev/null 2>/dev/null"%( + _config_vars['CC'].replace("'", "'\"'\"'"),)) + # The Apple compiler drivers return status 255 if no PPC + if (status >> 8) == 255: + # Compiler doesn't support PPC, remove the related + # '-arch' flags if not explicitly overridden by an + # environment variable + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub('-arch\s+ppc\w*\s', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _override_all_archs(_config_vars): + """Allow override of all archs with ARCHFLAGS env var""" + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for cv in _UNIVERSAL_CONFIG_VARS: + if cv in _config_vars and '-arch' in _config_vars[cv]: + flags = _config_vars[cv] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def _check_for_unavailable_sdk(_config_vars): + """Remove references to any SDKs not available""" + # If we're on OSX 10.5 or later and the user tries to + # compile an extension using an SDK that is not present + # on the current machine it is better to not use an SDK + # than to fail. This is particularly important with + # the standalong Command Line Tools alternative to a + # full-blown Xcode install since the CLT packages do not + # provide SDKs. If the SDK is not present, it is assumed + # that the header files and dev libs have been installed + # to /usr and /System/Library by either a standalone CLT + # package or the CLT component within Xcode. + cflags = _config_vars.get('CFLAGS', '') + m = re.search(r'-isysroot\s+(\S+)', cflags) + if m is not None: + sdk = m.group(1) + if not os.path.exists(sdk): + for cv in _UNIVERSAL_CONFIG_VARS: + # Do not alter a config var explicitly overriden by env var + if cv in _config_vars and cv not in os.environ: + flags = _config_vars[cv] + flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags) + _save_modified_value(_config_vars, cv, flags) + + return _config_vars + + +def compiler_fixup(compiler_so, cc_args): + """ + This function will strip '-isysroot PATH' and '-arch ARCH' from the + compile flags if the user has specified one them in extra_compile_flags. + + This is needed because '-arch ARCH' adds another architecture to the + build, without a way to remove an architecture. Furthermore GCC will + barf if multiple '-isysroot' arguments are present. + """ + stripArch = stripSysroot = False + + compiler_so = list(compiler_so) + + if not _supports_universal_builds(): + # OSX before 10.4.0, these don't support -arch and -isysroot at + # all. + stripArch = stripSysroot = True + else: + stripArch = '-arch' in cc_args + stripSysroot = '-isysroot' in cc_args + + if stripArch or 'ARCHFLAGS' in os.environ: + while True: + try: + index = compiler_so.index('-arch') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + if 'ARCHFLAGS' in os.environ and not stripArch: + # User specified different -arch flags in the environ, + # see also distutils.sysconfig + compiler_so = compiler_so + os.environ['ARCHFLAGS'].split() + + if stripSysroot: + while True: + try: + index = compiler_so.index('-isysroot') + # Strip this argument and the next one: + del compiler_so[index:index+2] + except ValueError: + break + + # Check if the SDK that is used during compilation actually exists, + # the universal build requires the usage of a universal SDK and not all + # users have that installed by default. + sysroot = None + if '-isysroot' in cc_args: + idx = cc_args.index('-isysroot') + sysroot = cc_args[idx+1] + elif '-isysroot' in compiler_so: + idx = compiler_so.index('-isysroot') + sysroot = compiler_so[idx+1] + + if sysroot and not os.path.isdir(sysroot): + from distutils import log + log.warn("Compiling with an SDK that doesn't seem to exist: %s", + sysroot) + log.warn("Please check your Xcode installation") + + return compiler_so + + +def customize_config_vars(_config_vars): + """Customize Python build configuration variables. + + Called internally from sysconfig with a mutable mapping + containing name/value pairs parsed from the configured + makefile used to build this interpreter. Returns + the mapping updated as needed to reflect the environment + in which the interpreter is running; in the case of + a Python from a binary installer, the installed + environment may be very different from the build + environment, i.e. different OS levels, different + built tools, different available CPU architectures. + + This customization is performed whenever + distutils.sysconfig.get_config_vars() is first + called. It may be used in environments where no + compilers are present, i.e. when installing pure + Python dists. Customization of compiler paths + and detection of unavailable archs is deferred + until the first extention module build is + requested (in distutils.sysconfig.customize_compiler). + + Currently called from distutils.sysconfig + """ + + if not _supports_universal_builds(): + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + _remove_universal_flags(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + # Remove references to sdks that are not found + _check_for_unavailable_sdk(_config_vars) + + return _config_vars + + +def customize_compiler(_config_vars): + """Customize compiler path and configuration variables. + + This customization is performed when the first + extension module build is requested + in distutils.sysconfig.customize_compiler). + """ + + # Find a compiler to use for extension module builds + _find_appropriate_compiler(_config_vars) + + # Remove ppc arch flags if not supported here + _remove_unsupported_archs(_config_vars) + + # Allow user to override all archs with ARCHFLAGS env var + _override_all_archs(_config_vars) + + return _config_vars + + +def get_platform_osx(_config_vars, osname, release, machine): + """Filter values for get_platform()""" + # called from get_platform() in sysconfig and distutils.util + # + # For our purposes, we'll assume that the system version from + # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set + # to. This makes the compatibility story a bit more sane because the + # machine is going to compile and link as if it were + # MACOSX_DEPLOYMENT_TARGET. + + macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '') + macrelease = _get_system_version() or macver + macver = macver or macrelease + + if macver: + release = macver + osname = "macosx" + + # Use the original CFLAGS value, if available, so that we + # return the same machine type for the platform string. + # Otherwise, distutils may consider this a cross-compiling + # case and disallow installs. + cflags = _config_vars.get(_INITPRE+'CFLAGS', + _config_vars.get('CFLAGS', '')) + if ((macrelease + '.') >= '10.4.' and + '-arch' in cflags.strip()): + # The universal build will build fat binaries, but not on + # systems before 10.4 + + machine = 'fat' + + archs = re.findall('-arch\s+(\S+)', cflags) + archs = tuple(sorted(set(archs))) + + if len(archs) == 1: + machine = archs[0] + elif archs == ('i386', 'ppc'): + machine = 'fat' + elif archs == ('i386', 'x86_64'): + machine = 'intel' + elif archs == ('i386', 'ppc', 'x86_64'): + machine = 'fat3' + elif archs == ('ppc64', 'x86_64'): + machine = 'fat64' + elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): + machine = 'universal' + else: + raise ValueError( + "Don't know machine value for archs=%r" % (archs,)) + + elif machine == 'i386': + # On OSX the machine type returned by uname is always the + # 32-bit variant, even if the executable architecture is + # the 64-bit variant + if sys.maxsize >= 2**32: + machine = 'x86_64' + + elif machine in ('PowerPC', 'Power_Macintosh'): + # Pick a sane name for the PPC architecture. + # See 'i386' case + if sys.maxsize >= 2**32: + machine = 'ppc64' + else: + machine = 'ppc' + + return (osname, release, machine) diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py --- a/lib-python/3/_pyio.py +++ b/lib-python/3/_pyio.py @@ -298,7 +298,7 @@ def seek(self, pos, whence=0): """Change stream position. - Change the stream position to byte offset offset. offset is + Change the stream position to byte offset pos. Argument pos is interpreted relative to the position indicated by whence. Values for whence are ints: @@ -889,12 +889,18 @@ return pos def readable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True def writable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return True @@ -1567,6 +1573,8 @@ return self._buffer def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") return self._seekable def readable(self): diff --git a/lib-python/3/_strptime.py b/lib-python/3/_strptime.py --- a/lib-python/3/_strptime.py +++ b/lib-python/3/_strptime.py @@ -339,7 +339,7 @@ raise ValueError("unconverted data remains: %s" % data_string[found.end():]) - year = 1900 + year = None month = day = 1 hour = minute = second = fraction = 0 tz = -1 @@ -444,6 +444,12 @@ else: tz = value break + leap_year_fix = False + if year is None and month == 2 and day == 29: + year = 1904 # 1904 is first leap year of 20th century + leap_year_fix = True + elif year is None: + year = 1900 # If we know the week of the year and what day of that week, we can figure # out the Julian day of the year. if julian == -1 and week_of_year != -1 and weekday != -1: @@ -472,6 +478,12 @@ else: gmtoff = None + if leap_year_fix: + # the caller didn't supply a year but asked for Feb 29th. We couldn't + # use the default of 1900 for computations. We set it back to ensure + # that February 29th is smaller than March 1st. + year = 1900 + return (year, month, day, hour, minute, second, weekday, julian, tz, gmtoff, tzname), fraction diff --git a/lib-python/3/_weakrefset.py b/lib-python/3/_weakrefset.py --- a/lib-python/3/_weakrefset.py +++ b/lib-python/3/_weakrefset.py @@ -63,7 +63,7 @@ yield item def __len__(self): - return sum(x() is not None for x in self.data) + return len(self.data) - len(self._pending_removals) def __contains__(self, item): try: @@ -114,36 +114,21 @@ def update(self, other): if self._pending_removals: self._commit_removals() - if isinstance(other, self.__class__): - self.data.update(other.data) - else: - for element in other: - self.add(element) + for element in other: + self.add(element) def __ior__(self, other): self.update(other) return self - # Helper functions for simple delegating methods. - def _apply(self, other, method): - if not isinstance(other, self.__class__): - other = self.__class__(other) - newdata = method(other.data) - newset = self.__class__() - newset.data = newdata + def difference(self, other): + newset = self.copy() + newset.difference_update(other) return newset - - def difference(self, other): - return self._apply(other, self.data.difference) __sub__ = difference def difference_update(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.difference_update(ref(item) for item in other) + self.__isub__(other) def __isub__(self, other): if self._pending_removals: self._commit_removals() @@ -154,13 +139,11 @@ return self def intersection(self, other): - return self._apply(other, self.data.intersection) + return self.__class__(item for item in other if item in self) __and__ = intersection def intersection_update(self, other): - if self._pending_removals: - self._commit_removals() - self.data.intersection_update(ref(item) for item in other) + self.__iand__(other) def __iand__(self, other): if self._pending_removals: self._commit_removals() @@ -169,17 +152,17 @@ def issubset(self, other): return self.data.issubset(ref(item) for item in other) - __lt__ = issubset + __le__ = issubset - def __le__(self, other): - return self.data <= set(ref(item) for item in other) + def __lt__(self, other): + return self.data < set(ref(item) for item in other) def issuperset(self, other): return self.data.issuperset(ref(item) for item in other) - __gt__ = issuperset + __ge__ = issuperset - def __ge__(self, other): - return self.data >= set(ref(item) for item in other) + def __gt__(self, other): + return self.data > set(ref(item) for item in other) def __eq__(self, other): if not isinstance(other, self.__class__): @@ -187,27 +170,24 @@ return self.data == set(ref(item) for item in other) def symmetric_difference(self, other): - return self._apply(other, self.data.symmetric_difference) + newset = self.copy() + newset.symmetric_difference_update(other) + return newset __xor__ = symmetric_difference def symmetric_difference_update(self, other): - if self._pending_removals: - self._commit_removals() - if self is other: - self.data.clear() - else: - self.data.symmetric_difference_update(ref(item) for item in other) + self.__ixor__(other) def __ixor__(self, other): if self._pending_removals: self._commit_removals() if self is other: self.data.clear() else: - self.data.symmetric_difference_update(ref(item) for item in other) + self.data.symmetric_difference_update(ref(item, self._remove) for item in other) return self def union(self, other): - return self._apply(other, self.data.union) + return self.__class__(e for s in (self, other) for e in s) __or__ = union def isdisjoint(self, other): diff --git a/lib-python/3/aifc.py b/lib-python/3/aifc.py --- a/lib-python/3/aifc.py +++ b/lib-python/3/aifc.py @@ -692,7 +692,9 @@ self._patchheader() def close(self): - if self._file: + if self._file is None: + return + try: self._ensure_header_written(0) if self._datawritten & 1: # quick pad to even size @@ -703,10 +705,12 @@ self._datalength != self._datawritten or \ self._marklength: self._patchheader() + finally: # Prevent ref cycles self._convert = None - self._file.close() + f = self._file self._file = None + f.close() # # Internal methods. diff --git a/lib-python/3/argparse.py b/lib-python/3/argparse.py --- a/lib-python/3/argparse.py +++ b/lib-python/3/argparse.py @@ -736,10 +736,10 @@ - default -- The value to be produced if the option is not specified. - - type -- The type which the command-line arguments should be converted - to, should be one of 'string', 'int', 'float', 'complex' or a - callable object that accepts a single string argument. If None, - 'string' is assumed. + - type -- A callable that accepts a single string argument, and + returns the converted value. The standard Python types str, int, + float, and complex are useful examples of such callables. If None, + str is used. - choices -- A container of values that should be allowed. If not None, after a command-line argument has been converted to the appropriate @@ -1701,9 +1701,12 @@ return args def parse_known_args(self, args=None, namespace=None): - # args default to the system args if args is None: + # args default to the system args args = _sys.argv[1:] + else: + # make sure that args are mutable + args = list(args) # default Namespace built from parser defaults if namespace is None: @@ -1714,10 +1717,7 @@ if action.dest is not SUPPRESS: if not hasattr(namespace, action.dest): if action.default is not SUPPRESS: - default = action.default - if isinstance(action.default, str): - default = self._get_value(action, default) - setattr(namespace, action.dest, default) + setattr(namespace, action.dest, action.default) # add any parser defaults that aren't present for dest in self._defaults: @@ -1957,12 +1957,23 @@ if positionals: self.error(_('too few arguments')) - # make sure all required actions were present + # make sure all required actions were present, and convert defaults. for action in self._actions: - if action.required: - if action not in seen_actions: + if action not in seen_actions: + if action.required: name = _get_action_name(action) self.error(_('argument %s is required') % name) + else: + # Convert action default now instead of doing it before + # parsing arguments to avoid calling convert functions + # twice (which may fail) if the argument was given, but + # only if it was defined already in the namespace + if (action.default is not None and + isinstance(action.default, str) and + hasattr(namespace, action.dest) and + action.default is getattr(namespace, action.dest)): + setattr(namespace, action.dest, + self._get_value(action, action.default)) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: @@ -1988,7 +1999,7 @@ for arg_string in arg_strings: # for regular arguments, just add them back into the list - if arg_string[0] not in self.fromfile_prefix_chars: + if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) # replace arguments referencing files with the file content @@ -2198,9 +2209,12 @@ # Value conversion methods # ======================== def _get_values(self, action, arg_strings): - # for everything but PARSER args, strip out '--' + # for everything but PARSER, REMAINDER args, strip out first '--' if action.nargs not in [PARSER, REMAINDER]: - arg_strings = [s for s in arg_strings if s != '--'] + try: + arg_strings.remove('--') + except ValueError: + pass # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: diff --git a/lib-python/3/asyncore.py b/lib-python/3/asyncore.py --- a/lib-python/3/asyncore.py +++ b/lib-python/3/asyncore.py @@ -225,6 +225,7 @@ debug = False connected = False accepting = False + connecting = False closing = False addr = None ignore_log_types = frozenset(['warning']) @@ -248,7 +249,7 @@ try: self.addr = sock.getpeername() except socket.error as err: - if err.args[0] == ENOTCONN: + if err.args[0] in (ENOTCONN, EINVAL): # To handle the case where we got an unconnected # socket. self.connected = False @@ -342,9 +343,11 @@ def connect(self, address): self.connected = False + self.connecting = True err = self.socket.connect_ex(address) if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \ or err == EINVAL and os.name in ('nt', 'ce'): + self.addr = address return if err in (0, EISCONN): self.addr = address @@ -390,7 +393,7 @@ else: return data except socket.error as why: - # winsock sometimes throws ENOTCONN + # winsock sometimes raises ENOTCONN if why.args[0] in _DISCONNECTED: self.handle_close() return b'' @@ -400,6 +403,7 @@ def close(self): self.connected = False self.accepting = False + self.connecting = False self.del_channel() try: self.socket.close() @@ -438,7 +442,8 @@ # sockets that are connected self.handle_accept() elif not self.connected: - self.handle_connect_event() + if self.connecting: + self.handle_connect_event() self.handle_read() else: self.handle_read() @@ -449,6 +454,7 @@ raise socket.error(err, _strerror(err)) self.handle_connect() self.connected = True + self.connecting = False def handle_write_event(self): if self.accepting: @@ -457,12 +463,8 @@ return if not self.connected: - #check for errors - err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) - if err != 0: - raise socket.error(err, _strerror(err)) - - self.handle_connect_event() + if self.connecting: + self.handle_connect_event() self.handle_write() def handle_expt_event(self): diff --git a/lib-python/3/bdb.py b/lib-python/3/bdb.py --- a/lib-python/3/bdb.py +++ b/lib-python/3/bdb.py @@ -22,6 +22,7 @@ self.skip = set(skip) if skip else None self.breaks = {} self.fncache = {} + self.frame_returning = None def canonic(self, filename): if filename == "<" + filename[1:-1] + ">": @@ -80,7 +81,11 @@ def dispatch_return(self, frame, arg): if self.stop_here(frame) or frame == self.returnframe: - self.user_return(frame, arg) + try: + self.frame_returning = frame + self.user_return(frame, arg) + finally: + self.frame_returning = None if self.quitting: raise BdbQuit return self.trace_dispatch @@ -186,6 +191,14 @@ def set_step(self): """Stop after one line of code.""" + # Issue #13183: pdb skips frames after hitting a breakpoint and running + # step commands. + # Restore the trace function in the caller (that may not have been set + # for performance reasons) when returning from the current frame. + if self.frame_returning: + caller_frame = self.frame_returning.f_back + if caller_frame and not caller_frame.f_trace: + caller_frame.f_trace = self.trace_dispatch self._set_stopinfo(None, None) def set_next(self, frame): diff --git a/lib-python/3/calendar.py b/lib-python/3/calendar.py --- a/lib-python/3/calendar.py +++ b/lib-python/3/calendar.py @@ -161,7 +161,11 @@ oneday = datetime.timedelta(days=1) while True: yield date - date += oneday + try: + date += oneday + except OverflowError: + # Adding one day could fail after datetime.MAXYEAR + break if date.month != month and date.weekday() == self.firstweekday: break diff --git a/lib-python/3/cgi.py b/lib-python/3/cgi.py --- a/lib-python/3/cgi.py +++ b/lib-python/3/cgi.py @@ -214,17 +214,17 @@ """ import http.client - boundary = "" + boundary = b"" if 'boundary' in pdict: boundary = pdict['boundary'] if not valid_boundary(boundary): raise ValueError('Invalid boundary in multipart form: %r' % (boundary,)) - nextpart = "--" + boundary - lastpart = "--" + boundary + "--" + nextpart = b"--" + boundary + lastpart = b"--" + boundary + b"--" partdict = {} - terminator = "" + terminator = b"" while terminator != lastpart: bytes = -1 @@ -243,7 +243,7 @@ raise ValueError('Maximum content length exceeded') data = fp.read(bytes) else: - data = "" + data = b"" # Read lines until end of part. lines = [] while 1: @@ -251,7 +251,7 @@ if not line: terminator = lastpart # End outer loop break - if line.startswith("--"): + if line.startswith(b"--"): terminator = line.rstrip() if terminator in (nextpart, lastpart): break @@ -263,12 +263,12 @@ if lines: # Strip final line terminator line = lines[-1] - if line[-2:] == "\r\n": + if line[-2:] == b"\r\n": line = line[:-2] - elif line[-1:] == "\n": + elif line[-1:] == b"\n": line = line[:-1] lines[-1] = line - data = "".join(lines) + data = b"".join(lines) line = headers['content-disposition'] if not line: continue diff --git a/lib-python/3/cgitb.py b/lib-python/3/cgitb.py --- a/lib-python/3/cgitb.py +++ b/lib-python/3/cgitb.py @@ -293,14 +293,19 @@ if self.logdir is not None: suffix = ['.txt', '.html'][self.format=="html"] (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir) + try: file = os.fdopen(fd, 'w') file.write(doc) file.close() - msg = '

      %s contains the description of this error.' % path + msg = '%s contains the description of this error.' % path except: - msg = '

      Tried to save traceback to %s, but failed.' % path - self.file.write(msg + '\n') + msg = 'Tried to save traceback to %s, but failed.' % path + + if self.format == 'html': + self.file.write('

      %s

      \n' % msg) + else: + self.file.write(msg + '\n') try: self.file.flush() except: pass diff --git a/lib-python/3/collections.py b/lib-python/3/collections.py --- a/lib-python/3/collections.py +++ b/lib-python/3/collections.py @@ -281,6 +281,10 @@ 'Return self as a plain tuple. Used by copy and pickle.' return tuple(self) + def __getstate__(self): + 'Exclude the OrderedDict from pickling' + return None + {field_defs} ''' diff --git a/lib-python/3/concurrent/futures/_base.py b/lib-python/3/concurrent/futures/_base.py --- a/lib-python/3/concurrent/futures/_base.py +++ b/lib-python/3/concurrent/futures/_base.py @@ -112,12 +112,14 @@ def __init__(self, num_pending_calls, stop_on_exception): self.num_pending_calls = num_pending_calls self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() super().__init__() def _decrement_pending_calls(self): - self.num_pending_calls -= 1 - if not self.num_pending_calls: - self.event.set() + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() def add_result(self, future): super().add_result(future) @@ -517,7 +519,7 @@ """Returns a iterator equivalent to map(fn, iter). Args: - fn: A callable that will take take as many arguments as there are + fn: A callable that will take as many arguments as there are passed iterables. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. diff --git a/lib-python/3/configparser.py b/lib-python/3/configparser.py --- a/lib-python/3/configparser.py +++ b/lib-python/3/configparser.py @@ -99,10 +99,9 @@ yes, on for True). Returns False or True. items(section=_UNSET, raw=False, vars=None) - If section is given, return a list of tuples with (section_name, - section_proxy) for each section, including DEFAULTSECT. Otherwise, - return a list of tuples with (name, value) for each option - in the section. + If section is given, return a list of tuples with (name, value) for + each option in the section. Otherwise, return a list of tuples with + (section_name, section_proxy) for each section, including DEFAULTSECT. remove_section(section) Remove the given file section and all its options. @@ -852,6 +851,19 @@ value_getter = lambda option: d[option] return [(option, value_getter(option)) for option in d.keys()] + def popitem(self): + """Remove a section from the parser and return it as + a (section_name, section_proxy) tuple. If no section is present, raise + KeyError. + + The section DEFAULT is never returned because it cannot be removed. + """ + for key in self.sections(): + value = self[key] + del self[key] + return key, value + raise KeyError + def optionxform(self, optionstr): return optionstr.lower() @@ -947,7 +959,8 @@ # XXX this is not atomic if read_dict fails at any point. Then again, # no update method in configparser is atomic in this implementation. - self.remove_section(key) + if key in self._sections: + self._sections[key].clear() self.read_dict({key: value}) def __delitem__(self, key): diff --git a/lib-python/3/ctypes/test/test_bitfields.py b/lib-python/3/ctypes/test/test_bitfields.py --- a/lib-python/3/ctypes/test/test_bitfields.py +++ b/lib-python/3/ctypes/test/test_bitfields.py @@ -246,5 +246,25 @@ _anonymous_ = ["_"] _fields_ = [("_", X)] + @unittest.skipUnless(hasattr(ctypes, "c_uint32"), "c_int32 is required") + def test_uint32(self): + class X(Structure): + _fields_ = [("a", c_uint32, 32)] + x = X() + x.a = 10 + self.assertEqual(x.a, 10) + x.a = 0xFDCBA987 + self.assertEqual(x.a, 0xFDCBA987) + + @unittest.skipUnless(hasattr(ctypes, "c_uint64"), "c_int64 is required") + def test_uint64(self): + class X(Structure): + _fields_ = [("a", c_uint64, 64)] + x = X() + x.a = 10 + self.assertEqual(x.a, 10) + x.a = 0xFEDCBA9876543211 + self.assertEqual(x.a, 0xFEDCBA9876543211) + if __name__ == "__main__": unittest.main() diff --git a/lib-python/3/ctypes/test/test_numbers.py b/lib-python/3/ctypes/test/test_numbers.py --- a/lib-python/3/ctypes/test/test_numbers.py +++ b/lib-python/3/ctypes/test/test_numbers.py @@ -220,6 +220,16 @@ # probably be changed: self.assertRaises(TypeError, c_int, c_long(42)) + def test_float_overflow(self): + import sys + big_int = int(sys.float_info.max) * 2 + for t in float_types + [c_longdouble]: + self.assertRaises(OverflowError, t, big_int) + if (hasattr(t, "__ctype_be__")): + self.assertRaises(OverflowError, t.__ctype_be__, big_int) + if (hasattr(t, "__ctype_le__")): + self.assertRaises(OverflowError, t.__ctype_le__, big_int) + ## def test_perf(self): ## check_perf() diff --git a/lib-python/3/ctypes/test/test_returnfuncptrs.py b/lib-python/3/ctypes/test/test_returnfuncptrs.py --- a/lib-python/3/ctypes/test/test_returnfuncptrs.py +++ b/lib-python/3/ctypes/test/test_returnfuncptrs.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +import os import _ctypes_test @@ -33,5 +34,34 @@ self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0) self.assertRaises(TypeError, strchr, b"abcdef") + def test_from_dll(self): + dll = CDLL(_ctypes_test.__file__) + # _CFuncPtr instances are now callable with a tuple argument + # which denotes a function name and a dll: + strchr = CFUNCTYPE(c_char_p, c_char_p, c_char)(("my_strchr", dll)) + self.assertTrue(strchr(b"abcdef", b"b"), "bcdef") + self.assertEqual(strchr(b"abcdef", b"x"), None) + self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0) + self.assertRaises(TypeError, strchr, b"abcdef") + + # Issue 6083: Reference counting bug + def test_from_dll_refcount(self): + class BadSequence(tuple): + def __getitem__(self, key): + if key == 0: + return "my_strchr" + if key == 1: + return CDLL(_ctypes_test.__file__) + raise IndexError + + # _CFuncPtr instances are now callable with a tuple argument + # which denotes a function name and a dll: + strchr = CFUNCTYPE(c_char_p, c_char_p, c_char)( + BadSequence(("my_strchr", CDLL(_ctypes_test.__file__)))) + self.assertTrue(strchr(b"abcdef", b"b"), "bcdef") + self.assertEqual(strchr(b"abcdef", b"x"), None) + self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0) + self.assertRaises(TypeError, strchr, b"abcdef") + if __name__ == "__main__": unittest.main() diff --git a/lib-python/3/ctypes/test/test_structures.py b/lib-python/3/ctypes/test/test_structures.py --- a/lib-python/3/ctypes/test/test_structures.py +++ b/lib-python/3/ctypes/test/test_structures.py @@ -1,6 +1,7 @@ import unittest from ctypes import * from struct import calcsize +import _testcapi class SubclassesTest(unittest.TestCase): def test_subclass(self): @@ -199,6 +200,14 @@ "_pack_": -1} self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) + # Issue 15989 + d = {"_fields_": [("a", c_byte)], + "_pack_": _testcapi.INT_MAX + 1} + self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) + d = {"_fields_": [("a", c_byte)], + "_pack_": _testcapi.UINT_MAX + 2} + self.assertRaises(ValueError, type(Structure), "X", (Structure,), d) + def test_initializers(self): class Person(Structure): _fields_ = [("name", c_char*6), diff --git a/lib-python/3/ctypes/test/test_win32.py b/lib-python/3/ctypes/test/test_win32.py --- a/lib-python/3/ctypes/test/test_win32.py +++ b/lib-python/3/ctypes/test/test_win32.py @@ -3,6 +3,7 @@ from ctypes import * from ctypes.test import is_resource_enabled import unittest, sys +from test import support import _ctypes_test @@ -60,7 +61,9 @@ def test_COMError(self): from _ctypes import COMError - self.assertEqual(COMError.__doc__, "Raised when a COM method call failed.") + if support.HAVE_DOCSTRINGS: + self.assertEqual(COMError.__doc__, + "Raised when a COM method call failed.") ex = COMError(-1, "text", ("details",)) self.assertEqual(ex.hresult, -1) diff --git a/lib-python/3/curses/__init__.py b/lib-python/3/curses/__init__.py --- a/lib-python/3/curses/__init__.py +++ b/lib-python/3/curses/__init__.py @@ -5,7 +5,7 @@ import curses from curses import textpad - curses.initwin() + curses.initscr() ... """ diff --git a/lib-python/3/decimal.py b/lib-python/3/decimal.py --- a/lib-python/3/decimal.py +++ b/lib-python/3/decimal.py @@ -1555,7 +1555,13 @@ def __float__(self): """Float representation.""" - return float(str(self)) + if self._isnan(): + if self.is_snan(): + raise ValueError("Cannot convert signaling NaN to float") + s = "-nan" if self._sign else "nan" + else: + s = str(self) + return float(s) def __int__(self): """Converts self to an int, truncating if necessary.""" diff --git a/lib-python/3/distutils/__init__.py b/lib-python/3/distutils/__init__.py --- a/lib-python/3/distutils/__init__.py +++ b/lib-python/3/distutils/__init__.py @@ -13,5 +13,5 @@ # Updated automatically by the Python release process. # #--start constants-- -__version__ = "3.2.3" +__version__ = "3.2.5" #--end constants-- diff --git a/lib-python/3/distutils/command/bdist_rpm.py b/lib-python/3/distutils/command/bdist_rpm.py --- a/lib-python/3/distutils/command/bdist_rpm.py +++ b/lib-python/3/distutils/command/bdist_rpm.py @@ -3,7 +3,7 @@ Implements the Distutils 'bdist_rpm' command (create RPM source and binary distributions).""" -import sys, os +import subprocess, sys, os from distutils.core import Command from distutils.debug import DEBUG from distutils.util import get_platform @@ -190,7 +190,7 @@ if self.fix_python: self.python = sys.executable else: - self.python = "python" + self.python = "python3" elif self.fix_python: raise DistutilsOptionError( "--python and --fix-python are mutually exclusive options") @@ -320,6 +320,7 @@ rpm_cmd.append('-bb') else: rpm_cmd.append('-ba') + rpm_cmd.extend(['--define', '__python %s' % self.python]) if self.rpm3_mode: rpm_cmd.extend(['--define', '_topdir %s' % os.path.abspath(self.rpm_base)]) @@ -405,6 +406,21 @@ 'Summary: ' + self.distribution.get_description(), ] + # Workaround for #14443 which affects some RPM based systems such as + # RHEL6 (and probably derivatives) + vendor_hook = subprocess.getoutput('rpm --eval %{__os_install_post}') + # Generate a potential replacement value for __os_install_post (whilst + # normalizing the whitespace to simplify the test for whether the + # invocation of brp-python-bytecompile passes in __python): + vendor_hook = '\n'.join([' %s \\' % line.strip() + for line in vendor_hook.splitlines()]) + problem = "brp-python-bytecompile \\\n" + fixed = "brp-python-bytecompile %{__python} \\\n" + fixed_hook = vendor_hook.replace(problem, fixed) + if fixed_hook != vendor_hook: + spec_file.append('# Workaround for http://bugs.python.org/issue14443') + spec_file.append('%define __os_install_post ' + fixed_hook + '\n') + # put locale summaries into spec file # XXX not supported for now (hard to put a dictionary # in a config file -- arg!) diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py --- a/lib-python/3/distutils/command/build_ext.py +++ b/lib-python/3/distutils/command/build_ext.py @@ -682,13 +682,13 @@ # CPython too. The point is that on PyPy with cpyext, the # config var 'SO' is just ".so" but we want to return # ".pypy-VERSION.so" instead. - so_ext = _get_c_extension_suffix() - if so_ext is None: - so_ext = get_config_var('SO') # fall-back + ext_suffix = _get_c_extension_suffix() + if ext_suffix is None: + ext_suffix = get_config_var('EXT_SUFFIX') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows if os.name == 'nt' and self.debug: - so_ext = '_d.pyd' - return os.path.join(*ext_path) + so_ext + ext_suffix = '_d.pyd' + return os.path.join(*ext_path) + ext_suffix def get_export_symbols(self, ext): """Return the list of symbols that a shared extension has to diff --git a/lib-python/3/distutils/command/check.py b/lib-python/3/distutils/command/check.py --- a/lib-python/3/distutils/command/check.py +++ b/lib-python/3/distutils/command/check.py @@ -23,6 +23,9 @@ def system_message(self, level, message, *children, **kwargs): self.messages.append((level, message, children, kwargs)) + return nodes.system_message(message, level=level, + type=self.levels[level], + *children, **kwargs) HAS_DOCUTILS = True except Exception: diff --git a/lib-python/3/distutils/command/install.py b/lib-python/3/distutils/command/install.py --- a/lib-python/3/distutils/command/install.py +++ b/lib-python/3/distutils/command/install.py @@ -285,8 +285,8 @@ if self.user and (self.prefix or self.exec_prefix or self.home or self.install_base or self.install_platbase): - raise DistutilsOptionError("can't combine user with with prefix/" - "exec_prefix/home or install_(plat)base") + raise DistutilsOptionError("can't combine user with prefix, " + "exec_prefix/home, or install_(plat)base") # Next, stuff that's wrong (or dubious) only on certain platforms. if os.name != "posix": diff --git a/lib-python/3/distutils/command/upload.py b/lib-python/3/distutils/command/upload.py --- a/lib-python/3/distutils/command/upload.py +++ b/lib-python/3/distutils/command/upload.py @@ -125,7 +125,7 @@ if self.sign: data['gpg_signature'] = (os.path.basename(filename) + ".asc", - open(filename+".asc").read()) + open(filename+".asc", "rb").read()) # set up the authentication user_pass = (self.username + ":" + self.password).encode('ascii') diff --git a/lib-python/3/distutils/config.py b/lib-python/3/distutils/config.py --- a/lib-python/3/distutils/config.py +++ b/lib-python/3/distutils/config.py @@ -4,7 +4,6 @@ that uses .pypirc in the distutils.command package. """ import os -import sys from configparser import ConfigParser from distutils.cmd import Command @@ -43,16 +42,8 @@ def _store_pypirc(self, username, password): """Creates a default .pypirc file.""" rc = self._get_rc_file() - f = open(rc, 'w') - try: + with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: f.write(DEFAULT_PYPIRC % (username, password)) - finally: - f.close() - try: - os.chmod(rc, 0o600) - except OSError: - # should do something better here - pass def _read_pypirc(self): """Reads the .pypirc file.""" diff --git a/lib-python/3/distutils/dir_util.py b/lib-python/3/distutils/dir_util.py --- a/lib-python/3/distutils/dir_util.py +++ b/lib-python/3/distutils/dir_util.py @@ -141,6 +141,10 @@ src_name = os.path.join(src, n) dst_name = os.path.join(dst, n) + if n.startswith('.nfs'): + # skip NFS rename files + continue + if preserve_symlinks and os.path.islink(src_name): link_dest = os.readlink(src_name) if verbose >= 1: diff --git a/lib-python/3/distutils/sysconfig.py b/lib-python/3/distutils/sysconfig.py --- a/lib-python/3/distutils/sysconfig.py +++ b/lib-python/3/distutils/sysconfig.py @@ -23,6 +23,3 @@ from distutils.sysconfig_cpython import * from distutils.sysconfig_cpython import _config_vars # needed by setuptools from distutils.sysconfig_cpython import _variable_rx # read_setup_file() - -_USE_CLANG = None - diff --git a/lib-python/3/distutils/sysconfig_cpython.py b/lib-python/3/distutils/sysconfig_cpython.py --- a/lib-python/3/distutils/sysconfig_cpython.py +++ b/lib-python/3/distutils/sysconfig_cpython.py @@ -146,7 +146,7 @@ "I don't know where Python installs its library " "on platform '%s'" % os.name) -_USE_CLANG = None + def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. @@ -155,42 +155,28 @@ varies across Unices and is stored in Python's Makefile. """ if compiler.compiler_type == "unix": - (cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \ + if sys.platform == "darwin": + # Perform first-time customization of compiler-related + # config vars on OS X now that we know we need a compiler. + # This is primarily to support Pythons from binary + # installers. The kind and paths to build tools on + # the user system may vary significantly from the system + # that Python itself was built on. Also the user OS + # version and build tools may not support the same set + # of CPU architectures for universal builds. + global _config_vars + if not _config_vars.get('CUSTOMIZED_OSX_COMPILER', ''): + import _osx_support + _osx_support.customize_compiler(_config_vars) + _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' + + (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', - 'CCSHARED', 'LDSHARED', 'SO', 'AR', 'ARFLAGS') + 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') newcc = None if 'CC' in os.environ: - newcc = os.environ['CC'] - elif sys.platform == 'darwin' and cc == 'gcc-4.2': - # Issue #13590: - # Since Apple removed gcc-4.2 in Xcode 4.2, we can no - # longer assume it is available for extension module builds. - # If Python was built with gcc-4.2, check first to see if - # it is available on this system; if not, try to use clang - # instead unless the caller explicitly set CC. - global _USE_CLANG - if _USE_CLANG is None: - from distutils import log - from subprocess import Popen, PIPE - p = Popen("! type gcc-4.2 && type clang && exit 2", - shell=True, stdout=PIPE, stderr=PIPE) - p.wait() - if p.returncode == 2: - _USE_CLANG = True - log.warn("gcc-4.2 not found, using clang instead") - else: - _USE_CLANG = False - if _USE_CLANG: - newcc = 'clang' - if newcc: - # On OS X, if CC is overridden, use that as the default - # command for LDSHARED as well - if (sys.platform == 'darwin' - and 'LDSHARED' not in os.environ - and ldshared.startswith(cc)): - ldshared = newcc + ldshared[len(cc):] - cc = newcc + cc = os.environ['CC'] if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: @@ -225,7 +211,7 @@ linker_exe=cc, archiver=archiver) - compiler.shared_lib_extension = so_ext + compiler.shared_lib_extension = shlib_suffix def get_config_h_filename(): @@ -480,6 +466,7 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" g['VERSION'] = get_python_version().replace(".", "") g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) @@ -499,6 +486,7 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" global _config_vars @@ -543,43 +531,11 @@ srcdir = os.path.join(base, _config_vars['srcdir']) _config_vars['srcdir'] = os.path.normpath(srcdir) + # OS X platforms require special customization to handle + # multi-architecture, multi-os-version installers if sys.platform == 'darwin': - kernel_version = os.uname()[2] # Kernel version (8.4.3) - major_version = int(kernel_version.split('.')[0]) - - if major_version < 8: - # On Mac OS X before 10.4, check if -arch and -isysroot - # are in CFLAGS or LDFLAGS and remove them if they are. - # This is needed when building extensions on a 10.3 system - # using a universal build of python. - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - flags = _config_vars[key] - flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) - flags = re.sub('-isysroot [^ \t]*', ' ', flags) - _config_vars[key] = flags - - else: - - # Allow the user to override the architecture flags using - # an environment variable. - # NOTE: This name was introduced by Apple in OSX 10.5 and - # is used by several scripting languages distributed with - # that OS release. - - if 'ARCHFLAGS' in os.environ: - arch = os.environ['ARCHFLAGS'] - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _config_vars[key] - flags = re.sub('-arch\s+\w+\s', ' ', flags) - flags = flags + ' ' + arch - _config_vars[key] = flags + import _osx_support + _osx_support.customize_config_vars(_config_vars) if args: vals = [] diff --git a/lib-python/3/distutils/tests/test_bdist_dumb.py b/lib-python/3/distutils/tests/test_bdist_dumb.py --- a/lib-python/3/distutils/tests/test_bdist_dumb.py +++ b/lib-python/3/distutils/tests/test_bdist_dumb.py @@ -88,9 +88,9 @@ fp.close() contents = sorted(os.path.basename(fn) for fn in contents) - wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], - 'foo.%s.pyc' % imp.get_tag(), - 'foo.py'] + wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py'] + if not sys.dont_write_bytecode: + wanted.append('foo.%s.pyc' % imp.get_tag()) self.assertEqual(contents, sorted(wanted)) def test_suite(): diff --git a/lib-python/3/distutils/tests/test_bdist_msi.py b/lib-python/3/distutils/tests/test_bdist_msi.py --- a/lib-python/3/distutils/tests/test_bdist_msi.py +++ b/lib-python/3/distutils/tests/test_bdist_msi.py @@ -1,12 +1,11 @@ """Tests for distutils.command.bdist_msi.""" +import sys import unittest -import sys - from test.support import run_unittest - from distutils.tests import support - at unittest.skipUnless(sys.platform=="win32", "These tests are only for win32") + + at unittest.skipUnless(sys.platform == 'win32', 'these tests require Windows') class BDistMSITestCase(support.TempdirManager, support.LoggingSilencer, unittest.TestCase): @@ -14,10 +13,11 @@ def test_minimal(self): # minimal test XXX need more tests from distutils.command.bdist_msi import bdist_msi - pkg_pth, dist = self.create_dist() + project_dir, dist = self.create_dist() cmd = bdist_msi(dist) cmd.ensure_finalized() + def test_suite(): return unittest.makeSuite(BDistMSITestCase) diff --git a/lib-python/3/distutils/tests/test_build_ext.py b/lib-python/3/distutils/tests/test_build_ext.py --- a/lib-python/3/distutils/tests/test_build_ext.py +++ b/lib-python/3/distutils/tests/test_build_ext.py @@ -73,8 +73,9 @@ self.assertEqual(xx.foo(2, 5), 7) self.assertEqual(xx.foo(13,15), 28) self.assertEqual(xx.new().demo(), None) - doc = 'This is a template module just for instruction.' - self.assertEqual(xx.__doc__, doc) + if support.HAVE_DOCSTRINGS: + doc = 'This is a template module just for instruction.' + self.assertEqual(xx.__doc__, doc) self.assertTrue(isinstance(xx.Null(), xx.Null)) self.assertTrue(isinstance(xx.Str(), xx.Str)) @@ -317,8 +318,8 @@ finally: os.chdir(old_wd) self.assertTrue(os.path.exists(so_file)) - so_ext = sysconfig.get_config_var('SO') - self.assertTrue(so_file.endswith(so_ext)) + ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') + self.assertTrue(so_file.endswith(ext_suffix)) so_dir = os.path.dirname(so_file) self.assertEqual(so_dir, other_tmp_dir) @@ -327,7 +328,7 @@ cmd.run() so_file = cmd.get_outputs()[0] self.assertTrue(os.path.exists(so_file)) - self.assertTrue(so_file.endswith(so_ext)) + self.assertTrue(so_file.endswith(ext_suffix)) so_dir = os.path.dirname(so_file) self.assertEqual(so_dir, cmd.build_lib) @@ -354,7 +355,7 @@ self.assertEqual(lastdir, 'bar') def test_ext_fullpath(self): - ext = sysconfig.get_config_vars()['SO'] + ext = sysconfig.get_config_var('EXT_SUFFIX') # building lxml.etree inplace #etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c') #etree_ext = Extension('lxml.etree', [etree_c]) diff --git a/lib-python/3/distutils/tests/test_dir_util.py b/lib-python/3/distutils/tests/test_dir_util.py --- a/lib-python/3/distutils/tests/test_dir_util.py +++ b/lib-python/3/distutils/tests/test_dir_util.py @@ -76,7 +76,6 @@ remove_tree(self.root_target, verbose=0) - def test_copy_tree_verbosity(self): mkpath(self.target, verbose=0) @@ -88,11 +87,8 @@ mkpath(self.target, verbose=0) a_file = os.path.join(self.target, 'ok.txt') - f = open(a_file, 'w') - try: + with open(a_file, 'w') as f: f.write('some content') - finally: - f.close() wanted = ['copying %s -> %s' % (a_file, self.target2)] copy_tree(self.target, self.target2, verbose=1) @@ -101,6 +97,21 @@ remove_tree(self.root_target, verbose=0) remove_tree(self.target2, verbose=0) + def test_copy_tree_skips_nfs_temp_files(self): + mkpath(self.target, verbose=0) + + a_file = os.path.join(self.target, 'ok.txt') + nfs_file = os.path.join(self.target, '.nfs123abc') + for f in a_file, nfs_file: + with open(f, 'w') as fh: + fh.write('some content') + + copy_tree(self.target, self.target2) + self.assertEqual(os.listdir(self.target2), ['ok.txt']) + + remove_tree(self.root_target, verbose=0) + remove_tree(self.target2, verbose=0) + def test_ensure_relative(self): if os.sep == '/': self.assertEqual(ensure_relative('/home/foo'), 'home/foo') diff --git a/lib-python/3/distutils/tests/test_install.py b/lib-python/3/distutils/tests/test_install.py --- a/lib-python/3/distutils/tests/test_install.py +++ b/lib-python/3/distutils/tests/test_install.py @@ -23,7 +23,7 @@ def _make_ext_name(modname): if os.name == 'nt' and sys.executable.endswith('_d.exe'): modname += '_d' - return modname + sysconfig.get_config_var('SO') + return modname + sysconfig.get_config_var('EXT_SUFFIX') class InstallTestCase(support.TempdirManager, @@ -165,7 +165,7 @@ cmd.home = 'home' self.assertRaises(DistutilsOptionError, cmd.finalize_options) - # can't combine user with with prefix/exec_prefix/home or + # can't combine user with prefix/exec_prefix/home or # install_(plat)base cmd.prefix = None cmd.user = 'user' diff --git a/lib-python/3/distutils/tests/test_msvc9compiler.py b/lib-python/3/distutils/tests/test_msvc9compiler.py --- a/lib-python/3/distutils/tests/test_msvc9compiler.py +++ b/lib-python/3/distutils/tests/test_msvc9compiler.py @@ -104,7 +104,7 @@ unittest.TestCase): def test_no_compiler(self): - # makes sure query_vcvarsall throws + # makes sure query_vcvarsall raises # a DistutilsPlatformError if the compiler # is not found from distutils.msvc9compiler import query_vcvarsall diff --git a/lib-python/3/distutils/tests/test_register.py b/lib-python/3/distutils/tests/test_register.py --- a/lib-python/3/distutils/tests/test_register.py +++ b/lib-python/3/distutils/tests/test_register.py @@ -1,5 +1,4 @@ """Tests for distutils.command.register.""" -import sys import os import unittest import getpass @@ -10,11 +9,14 @@ from distutils.command import register as register_module from distutils.command.register import register -from distutils.core import Distribution from distutils.errors import DistutilsSetupError -from distutils.tests import support -from distutils.tests.test_config import PYPIRC, PyPIRCCommandTestCase +from distutils.tests.test_config import PyPIRCCommandTestCase + +try: + import docutils +except ImportError: + docutils = None PYPIRC_NOPASSWORD = """\ [distutils] @@ -193,6 +195,7 @@ self.assertEqual(headers['Content-length'], '290') self.assertTrue((b'tarek') in req.data) + @unittest.skipUnless(docutils is not None, 'needs docutils') def test_strict(self): # testing the script option # when on, the register command stops if @@ -205,13 +208,6 @@ cmd.strict = 1 self.assertRaises(DistutilsSetupError, cmd.run) - # we don't test the reSt feature if docutils - # is not installed - try: - import docutils - except ImportError: - return - # metadata are OK but long_description is broken metadata = {'url': 'xxx', 'author': 'xxx', 'author_email': 'éxéxé', @@ -265,6 +261,22 @@ finally: del register_module.input + @unittest.skipUnless(docutils is not None, 'needs docutils') + def test_register_invalid_long_description(self): + description = ':funkie:`str`' # mimic Sphinx-specific markup + metadata = {'url': 'xxx', 'author': 'xxx', + 'author_email': 'xxx', + 'name': 'xxx', 'version': 'xxx', + 'long_description': description} + cmd = self._get_cmd(metadata) + cmd.ensure_finalized() + cmd.strict = True + inputs = Inputs('2', 'tarek', 'tarek at ziade.org') + register_module.input = inputs + self.addCleanup(delattr, register_module, 'input') + + self.assertRaises(DistutilsSetupError, cmd.run) + def test_check_metadata_deprecated(self): # makes sure make_metadata is deprecated cmd = self._get_cmd() diff --git a/lib-python/3/distutils/tests/test_sdist.py b/lib-python/3/distutils/tests/test_sdist.py --- a/lib-python/3/distutils/tests/test_sdist.py +++ b/lib-python/3/distutils/tests/test_sdist.py @@ -6,6 +6,7 @@ import zipfile from os.path import join from textwrap import dedent +from test.support import captured_stdout, check_warnings, run_unittest try: import zlib @@ -13,7 +14,6 @@ except ImportError: ZLIB_SUPPORT = False -from test.support import captured_stdout, check_warnings, run_unittest from distutils.command.sdist import sdist, show_formats from distutils.core import Distribution @@ -83,9 +83,8 @@ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run') def test_prune_file_list(self): - # this test creates a package with some vcs dirs in it - # and launch sdist to make sure they get pruned - # on all systems + # this test creates a project with some VCS dirs and an NFS rename + # file, then launches sdist to check they get pruned on all systems # creating VCS directories with some files in them os.mkdir(join(self.tmp_dir, 'somecode', '.svn')) @@ -99,6 +98,8 @@ self.write_file((self.tmp_dir, 'somecode', '.git', 'ok'), 'xxx') + self.write_file((self.tmp_dir, 'somecode', '.nfs0001'), 'xxx') + # now building a sdist dist, cmd = self.get_cmd() @@ -326,6 +327,7 @@ # filling data_files by pointing files in package_data dist.package_data = {'somecode': ['*.txt']} self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#') + cmd.formats = ['gztar'] cmd.ensure_finalized() cmd.run() diff --git a/lib-python/3/distutils/tests/test_sysconfig.py b/lib-python/3/distutils/tests/test_sysconfig.py --- a/lib-python/3/distutils/tests/test_sysconfig.py +++ b/lib-python/3/distutils/tests/test_sysconfig.py @@ -102,7 +102,27 @@ import sysconfig as global_sysconfig self.assertEqual(global_sysconfig.get_config_var('CFLAGS'), sysconfig.get_config_var('CFLAGS')) self.assertEqual(global_sysconfig.get_config_var('LDFLAGS'), sysconfig.get_config_var('LDFLAGS')) - self.assertEqual(global_sysconfig.get_config_var('LDSHARED'),sysconfig.get_config_var('LDSHARED')) + + @unittest.skipIf(sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'),'compiler flags customized') + def test_sysconfig_compiler_vars(self): + # On OS X, binary installers support extension module building on + # various levels of the operating system with differing Xcode + # configurations. This requires customization of some of the + # compiler configuration directives to suit the environment on + # the installed machine. Some of these customizations may require + # running external programs and, so, are deferred until needed by + # the first extension module build. With Python 3.3, only + # the Distutils version of sysconfig is used for extension module + # builds, which happens earlier in the Distutils tests. This may + # cause the following tests to fail since no tests have caused + # the global version of sysconfig to call the customization yet. + # The solution for now is to simply skip this test in this case. + # The longer-term solution is to only have one version of sysconfig. + + import sysconfig as global_sysconfig + if sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'): + return + self.assertEqual(global_sysconfig.get_config_var('LDSHARED'), sysconfig.get_config_var('LDSHARED')) self.assertEqual(global_sysconfig.get_config_var('CC'), sysconfig.get_config_var('CC')) diff --git a/lib-python/3/distutils/tests/test_util.py b/lib-python/3/distutils/tests/test_util.py --- a/lib-python/3/distutils/tests/test_util.py +++ b/lib-python/3/distutils/tests/test_util.py @@ -13,6 +13,7 @@ from distutils.sysconfig import get_config_vars from distutils import sysconfig from distutils.tests import support +import _osx_support class UtilTestCase(support.EnvironGuard, unittest.TestCase): @@ -92,6 +93,7 @@ ('Darwin Kernel Version 8.11.1: ' 'Wed Oct 10 18:23:28 PDT 2007; ' 'root:xnu-792.25.20~1/RELEASE_I386'), 'i386')) + _osx_support._remove_original_values(get_config_vars()) get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3' From noreply at buildbot.pypy.org Sat Mar 15 01:48:18 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 15 Mar 2014 01:48:18 +0100 (CET) Subject: [pypy-commit] pypy stdlib-3.2.5: bump Message-ID: <20140315004818.4026B1C3058@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r69966:9c9dbbb65ba8 Date: 2014-03-14 17:46 -0700 http://bitbucket.org/pypy/pypy/changeset/9c9dbbb65ba8/ Log: bump diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -26,7 +26,7 @@ #define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "3.2.3" +#define PY_VERSION "3.2.5" /* PyPy version as a string */ #define PYPY_VERSION "2.3.0-alpha0" diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -7,7 +7,7 @@ from pypy.interpreter import gateway #XXX # the release serial 42 is not in range(16) -CPYTHON_VERSION = (3, 2, 3, "final", 0) +CPYTHON_VERSION = (3, 2, 5, "final", 0) #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h From noreply at buildbot.pypy.org Sat Mar 15 07:54:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 07:54:04 +0100 (CET) Subject: [pypy-commit] cffi default: Use the word 'Note' to make it sound less ominous Message-ID: <20140315065404.11E9D1C03A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1481:ddbfc6f0300b Date: 2014-03-15 07:53 +0100 http://bitbucket.org/cffi/cffi/changeset/ddbfc6f0300b/ Log: Use the word 'Note' to make it sound less ominous diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -43,9 +43,9 @@ def ask_supports_thread(): if sys.platform == "darwin": - sys.stderr.write("OS/X: confusion between 'cc' versus 'gcc'") - sys.stderr.write(" (see issue 123)\n") - sys.stderr.write("will not use '__thread' in the C code\n") + sys.stderr.write("Note: will not use '__thread' in the C code\n") + sys.stderr.write("This is for OS/X-specific reasons: confusion " + "between 'cc' versus 'gcc' (see issue 123)\n") return import distutils.errors from distutils.ccompiler import new_compiler @@ -53,8 +53,8 @@ try: compiler.compile(['c/check__thread.c']) except distutils.errors.CompileError: - sys.stderr.write("the above error message can be safely ignored;\n") - sys.stderr.write("will not use '__thread' in the C code\n") + sys.stderr.write("Note: will not use '__thread' in the C code\n") + sys.stderr.write("The above error message can be safely ignored\n") else: define_macros.append(('USE__THREAD', None)) try: From noreply at buildbot.pypy.org Sat Mar 15 08:22:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 08:22:36 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: hg merge default Message-ID: <20140315072236.8E05E1C0483@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69967:fbedfb1494f1 Date: 2014-03-15 08:21 +0100 http://bitbucket.org/pypy/pypy/changeset/fbedfb1494f1/ Log: hg merge default diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -38,6 +38,7 @@ if sys.version_info[0] >= 3: StandardError = Exception + cmp = lambda x, y: (x > y) - (x < y) long = int xrange = range basestring = unicode = str diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,6 +64,8 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] + # prevent linking with python27.lib + kwds["compile_extra"].append("/DPy_BUILD_CORE") elif sys.platform == 'darwin': kwds["link_files"] = [str(api_library + '.dylib')] else: diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -93,6 +93,11 @@ # possible aliasing). self.clear() self._lazy_setfield = None + if optheap.postponed_op: + for a in op.getarglist(): + if a is optheap.postponed_op.result: + optheap.emit_postponed_op() + break optheap.next_optimization.propagate_forward(op) if not can_cache: return @@ -179,6 +184,9 @@ def flush(self): self.force_all_lazy_setfields_and_arrayitems() + self.emit_postponed_op() + + def emit_postponed_op(self): if self.postponed_op: postponed_op = self.postponed_op self.postponed_op = None @@ -227,10 +235,7 @@ def emit_operation(self, op): self.emitting_operation(op) - if self.postponed_op: - postponed_op = self.postponed_op - self.postponed_op = None - self.next_optimization.propagate_forward(postponed_op) + self.emit_postponed_op() if (op.is_comparison() or op.getopnum() == rop.CALL_MAY_FORCE or op.is_ovf()): self.postponed_op = op diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1660,6 +1660,16 @@ """ self.optimize_loop(ops, ops) + def test_setfield_int_eq_result(self): + # test that the setfield_gc does not end up before int_eq + ops = """ + [p1, i1, i2] + i3 = int_eq(i1, i2) + setfield_gc(p1, i3, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, ops) + def test_duplicate_setfield_aliasing(self): # a case where aliasing issues (and not enough cleverness) mean # that we fail to remove any setfield_gc @@ -5433,7 +5443,6 @@ jump(i0) """ self.optimize_loop(ops, expected) - - + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1,14 +1,15 @@ import py from rpython.rlib.objectmodel import instantiate +from rpython.jit.metainterp import compile, resume +from rpython.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt, TreeLoop +from rpython.jit.metainterp.optimize import InvalidLoop +from rpython.jit.metainterp.optimizeopt import build_opt_chain from rpython.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin, BaseTest, convert_old_style_to_targets) -from rpython.jit.metainterp.optimizeopt import build_opt_chain -from rpython.jit.metainterp.optimize import InvalidLoop -from rpython.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt -from rpython.jit.metainterp.history import TreeLoop -from rpython.jit.metainterp import compile, resume +from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import \ + FakeMetaInterpStaticData from rpython.jit.metainterp.resoperation import rop, opname, oparity -from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import FakeMetaInterpStaticData + def test_build_opt_chain(): def check(chain, expected_names): @@ -40,7 +41,6 @@ class BaseTestWithUnroll(BaseTest): - enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" def optimize_loop(self, ops, expected, expected_preamble=None, @@ -93,8 +93,8 @@ def raises(self, e, fn, *args): return py.test.raises(e, fn, *args).value + class OptimizeOptTest(BaseTestWithUnroll): - def setup_method(self, meth=None): class FailDescr(compile.ResumeGuardDescr): oparse = None @@ -130,7 +130,6 @@ self.namespace.pop('fdescr', None) self.namespace.pop('fdescr2', None) - def test_simple(self): ops = """ [] @@ -974,7 +973,6 @@ """ self.optimize_loop(ops, expected, preamble) - # ---------- def test_virtual_1(self): @@ -1252,7 +1250,6 @@ """ self.optimize_loop(ops, expected, preamble) - def test_virtual_constant_isnonnull(self): ops = """ [i0] @@ -2789,8 +2786,7 @@ p2 = new_with_vtable(ConstClass(node_vtable)) jump(p2) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + self.raises(InvalidLoop, self.optimize_loop, ops, "crash!") def test_invalid_loop_2(self): ops = """ @@ -2801,8 +2797,7 @@ escape(p2) # prevent it from staying Virtual jump(p2) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + self.raises(InvalidLoop, self.optimize_loop, ops, "crash!") def test_invalid_loop_3(self): ops = """ @@ -2824,8 +2819,7 @@ guard_value(p2, ConstPtr(myptr)) [] jump(p2) """ - exc = self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + exc = self.raises(InvalidLoop, self.optimize_loop, ops, "crash!") if exc: assert "node" in exc.msg @@ -3151,7 +3145,6 @@ """ self.optimize_loop(ops, expected) - def test_int_and_or_with_zero(self): ops = """ [i0, i1] @@ -5107,7 +5100,6 @@ """ self.optimize_loop(ops, expected) - def test_division_nonneg(self): py.test.skip("harder") # this is how an app-level division turns into right now @@ -5444,7 +5436,6 @@ """ self.optimize_loop(ops, ops, ops) - def test_mul_ovf(self): ops = """ [i0, i1] @@ -5591,7 +5582,6 @@ def is_integer_bounded(self): return False - for n in ('inst_w_seq', 'inst_index', 'inst_w_list', 'inst_length', 'inst_start', 'inst_step'): self.namespace[n] = FakeDescr(n) @@ -5847,7 +5837,7 @@ self.optimize_loop(ops, optops, preamble) # check with replacing 'str' with 'unicode' everywhere def r(s): - return s.replace('str','unicode').replace('s"', 'u"') + return s.replace('str', 'unicode').replace('s"', 'u"') self.optimize_loop(r(ops), r(optops), r(preamble)) def test_newstr_1(self): @@ -6277,7 +6267,7 @@ if isinstance(value, calldescrtype): extra = value.get_extra_info() if (extra and isinstance(extra, effectinfotype) and - extra.oopspecindex == oopspecindex): + extra.oopspecindex == oopspecindex): # returns 0 for 'func' in this test return value, 0 raise AssertionError("not found: oopspecindex=%d" % @@ -7395,7 +7385,6 @@ """ self.optimize_loop(ops, expected, expected_short=short) - def test_loopinvariant_constant_strgetitem(self): ops = """ [p0] @@ -7454,7 +7443,7 @@ """ self.optimize_loop(ops, expected, expected_short=short) - def test_propagate_virtual_arryalen(self): + def test_propagate_virtual_arraylen(self): ops = """ [p0] p404 = new_array(2, descr=arraydescr) @@ -7831,7 +7820,6 @@ """ self.optimize_loop(ops, expected) - def test_setarrayitem_followed_by_arraycopy(self): ops = """ [p1, p2] @@ -8124,7 +8112,6 @@ """ self.optimize_loop(ops, expected) - def test_issue1080_infinitie_loop_simple(self): ops = """ [p69] @@ -8149,8 +8136,7 @@ guard_value(p1, ConstPtr(myptr)) [] jump(p1) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, ops) + self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_licm_boxed_opaque_getitem(self): ops = """ @@ -8225,8 +8211,7 @@ guard_value(p1, ConstPtr(myptr)) [] jump(p1) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, ops) + self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_cond_call_with_a_constant(self): ops = """ @@ -8253,6 +8238,16 @@ """ self.optimize_loop(ops, expected) + def test_hippyvm_unroll_bug(self): + ops = """ + [p0, i1, i2] + i3 = int_add(i1, 1) + i4 = int_eq(i3, i2) + setfield_gc(p0, i4, descr=valuedescr) + jump(p0, i3, i2) + """ + self.optimize_loop(ops, ops) + + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass - diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -369,32 +369,21 @@ for rule in rules: m.rule(*rule) - objects = ' $(OBJECTS)' - create_obj_response_file = [] - if len(' '.join(rel_ofiles)) > 4000: - # cmd.exe has a limit of ~4000 characters before a command line is too long. - # Use a response file instead, at the cost of making the Makefile very ugly. - for i in range(len(rel_ofiles) - 1): - create_obj_response_file.append('echo %s >> obj_names.rsp' % \ - rel_ofiles[i]) - # use cmd /c for the last one so that the file is flushed - create_obj_response_file.append('cmd /c echo %s >> obj_names.rsp' % \ - rel_ofiles[-1]) - objects = ' @obj_names.rsp' if self.version < 80: m.rule('$(TARGET)', '$(OBJECTS)', - create_obj_response_file + [\ - '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' /out:$@ $(LIBDIRS) $(LIBS)', + [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA) /out:$@' +\ + ' $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', ]) else: m.rule('$(TARGET)', '$(OBJECTS)', - create_obj_response_file + [\ - '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST /MANIFESTFILE:$*.manifest', + [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + \ + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST' + \ + ' /MANIFESTFILE:$*.manifest @<<\n$(OBJECTS)\n<<', 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', ]) m.rule('debugmode_$(TARGET)', '$(OBJECTS)', - create_obj_response_file + [\ - '$(CC_LINK) /nologo /DEBUG $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS)', + [ '$(CC_LINK) /nologo /DEBUG $(LDFLAGS) $(LDFLAGSEXTRA)' + \ + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', ]) if shared: From noreply at buildbot.pypy.org Sat Mar 15 08:30:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 08:30:55 +0100 (CET) Subject: [pypy-commit] stmgc default: Should not have an effect, but better safe than (rarely) sorry Message-ID: <20140315073055.E664E1C06AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1016:7f6e7192dd38 Date: 2014-03-15 08:30 +0100 http://bitbucket.org/pypy/stmgc/changeset/7f6e7192dd38/ Log: Should not have an effect, but better safe than (rarely) sorry diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -172,8 +172,14 @@ kind! The shadowstack may not be correct here. It should not end in a deadlock, because the target thread is, in principle, guaranteed to call abort_with_mutex() - very soon. + very soon. Just to be on the safe side, make it really + impossible for the target thread to later enter the same + cond_wait(C_ABORTED) (and thus wait, possibly for us, + ending in a deadlock): check again must_abort() first. */ + if (must_abort()) + abort_with_mutex(); + dprintf(("contention: wait C_ABORTED...\n")); cond_wait(C_ABORTED); dprintf(("contention: done\n")); From noreply at buildbot.pypy.org Sat Mar 15 09:18:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 09:18:48 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/7f6e7192dd38 Message-ID: <20140315081848.B3C451C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69968:1b664888133d Date: 2014-03-15 09:18 +0100 http://bitbucket.org/pypy/pypy/changeset/1b664888133d/ Log: import stmgc/7f6e7192dd38 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -3c39b8d8e184 +7f6e7192dd38 diff --git a/rpython/translator/stm/src_stm/stm/contention.c b/rpython/translator/stm/src_stm/stm/contention.c --- a/rpython/translator/stm/src_stm/stm/contention.c +++ b/rpython/translator/stm/src_stm/stm/contention.c @@ -173,8 +173,14 @@ kind! The shadowstack may not be correct here. It should not end in a deadlock, because the target thread is, in principle, guaranteed to call abort_with_mutex() - very soon. + very soon. Just to be on the safe side, make it really + impossible for the target thread to later enter the same + cond_wait(C_ABORTED) (and thus wait, possibly for us, + ending in a deadlock): check again must_abort() first. */ + if (must_abort()) + abort_with_mutex(); + dprintf(("contention: wait C_ABORTED...\n")); cond_wait(C_ABORTED); dprintf(("contention: done\n")); diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -12,7 +12,7 @@ void _stm_write_slowpath(object_t *obj) { - assert(_running_transaction()); + assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); /* is this an object from the same transaction, outside the nursery? */ diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -205,7 +205,7 @@ } static bool _is_tl_registered(stm_thread_local_t *tl); -static bool _running_transaction(void); +static bool _seems_to_be_running_transaction(void); static void teardown_core(void); static void abort_with_mutex(void) __attribute__((noreturn)); diff --git a/rpython/translator/stm/src_stm/stm/sync.c b/rpython/translator/stm/src_stm/stm/sync.c --- a/rpython/translator/stm/src_stm/stm/sync.c +++ b/rpython/translator/stm/src_stm/stm/sync.c @@ -214,7 +214,7 @@ } __attribute__((unused)) -static bool _running_transaction(void) +static bool _seems_to_be_running_transaction(void) { return (STM_SEGMENT->running_thread != NULL); } @@ -302,6 +302,7 @@ static void enter_safe_point_if_requested(void) { + assert(_seems_to_be_running_transaction()); assert(_has_mutex()); while (1) { if (must_abort()) diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -148,6 +148,7 @@ stm_write() is called, or immediately after getting the object from stm_allocate(), as long as the rules above are respected. */ +__attribute__((always_inline)) static inline void stm_read(object_t *obj) { ((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm = @@ -159,6 +160,7 @@ the next one, then stm_write() needs to be called again. It is not necessary to call it immediately after stm_allocate(). */ +__attribute__((always_inline)) static inline void stm_write(object_t *obj) { if (UNLIKELY((obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) != 0)) @@ -178,6 +180,7 @@ of 8 and at least 16. In the fast-path, this is inlined to just a few assembler instructions. */ +__attribute__((always_inline)) static inline object_t *stm_allocate(ssize_t size_rounded_up) { OPT_ASSERT(size_rounded_up >= 16); From noreply at buildbot.pypy.org Sat Mar 15 09:26:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 09:26:21 +0100 (CET) Subject: [pypy-commit] pypy default: Fix "return 0" when the return type is some pointer Message-ID: <20140315082621.B1E8E1C31BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69969:4b77b625bdcc Date: 2014-03-15 09:24 +0100 http://bitbucket.org/pypy/pypy/changeset/4b77b625bdcc/ Log: Fix "return 0" when the return type is some pointer diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -213,7 +213,7 @@ if rffi.aroundstate.after: # After external call is before entering Python rffi.aroundstate.after() - return 0 + return lltype.nullptr(PyGILState_STATE.TO) @cpython_api([PyGILState_STATE], lltype.Void) def PyGILState_Release(space, state): From noreply at buildbot.pypy.org Sat Mar 15 09:26:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 09:26:23 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140315082623.162FB1C31BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69970:3d19219df4dd Date: 2014-03-15 09:25 +0100 http://bitbucket.org/pypy/pypy/changeset/3d19219df4dd/ Log: merge heads diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -38,6 +38,7 @@ if sys.version_info[0] >= 3: StandardError = Exception + cmp = lambda x, y: (x > y) - (x < y) long = int xrange = range basestring = unicode = str diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,6 +64,8 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] + # prevent linking with python27.lib + kwds["compile_extra"].append("/DPy_BUILD_CORE") elif sys.platform == 'darwin': kwds["link_files"] = [str(api_library + '.dylib')] else: diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -368,13 +368,10 @@ class ConcreteArray(ConcreteArrayNotOwning): def __init__(self, shape, dtype, order, strides, backstrides, storage=lltype.nullptr(RAW_STORAGE)): - null_storage = lltype.nullptr(RAW_STORAGE) + if storage == lltype.nullptr(RAW_STORAGE): + storage = dtype.itemtype.malloc(support.product(shape) * dtype.elsize) ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, backstrides, - null_storage) - if storage == lltype.nullptr(RAW_STORAGE): - self.storage = dtype.itemtype.malloc(self.size) - else: - self.storage = storage + storage) def __del__(self): free_raw_storage(self.storage, track_allocation=False) diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -93,6 +93,11 @@ # possible aliasing). self.clear() self._lazy_setfield = None + if optheap.postponed_op: + for a in op.getarglist(): + if a is optheap.postponed_op.result: + optheap.emit_postponed_op() + break optheap.next_optimization.propagate_forward(op) if not can_cache: return @@ -179,6 +184,9 @@ def flush(self): self.force_all_lazy_setfields_and_arrayitems() + self.emit_postponed_op() + + def emit_postponed_op(self): if self.postponed_op: postponed_op = self.postponed_op self.postponed_op = None @@ -227,10 +235,7 @@ def emit_operation(self, op): self.emitting_operation(op) - if self.postponed_op: - postponed_op = self.postponed_op - self.postponed_op = None - self.next_optimization.propagate_forward(postponed_op) + self.emit_postponed_op() if (op.is_comparison() or op.getopnum() == rop.CALL_MAY_FORCE or op.is_ovf()): self.postponed_op = op diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1660,6 +1660,16 @@ """ self.optimize_loop(ops, ops) + def test_setfield_int_eq_result(self): + # test that the setfield_gc does not end up before int_eq + ops = """ + [p1, i1, i2] + i3 = int_eq(i1, i2) + setfield_gc(p1, i3, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, ops) + def test_duplicate_setfield_aliasing(self): # a case where aliasing issues (and not enough cleverness) mean # that we fail to remove any setfield_gc @@ -5433,7 +5443,6 @@ jump(i0) """ self.optimize_loop(ops, expected) - - + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1,14 +1,15 @@ import py from rpython.rlib.objectmodel import instantiate +from rpython.jit.metainterp import compile, resume +from rpython.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt, TreeLoop +from rpython.jit.metainterp.optimize import InvalidLoop +from rpython.jit.metainterp.optimizeopt import build_opt_chain from rpython.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin, BaseTest, convert_old_style_to_targets) -from rpython.jit.metainterp.optimizeopt import build_opt_chain -from rpython.jit.metainterp.optimize import InvalidLoop -from rpython.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt -from rpython.jit.metainterp.history import TreeLoop -from rpython.jit.metainterp import compile, resume +from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import \ + FakeMetaInterpStaticData from rpython.jit.metainterp.resoperation import rop, opname, oparity -from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import FakeMetaInterpStaticData + def test_build_opt_chain(): def check(chain, expected_names): @@ -40,7 +41,6 @@ class BaseTestWithUnroll(BaseTest): - enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" def optimize_loop(self, ops, expected, expected_preamble=None, @@ -93,8 +93,8 @@ def raises(self, e, fn, *args): return py.test.raises(e, fn, *args).value + class OptimizeOptTest(BaseTestWithUnroll): - def setup_method(self, meth=None): class FailDescr(compile.ResumeGuardDescr): oparse = None @@ -130,7 +130,6 @@ self.namespace.pop('fdescr', None) self.namespace.pop('fdescr2', None) - def test_simple(self): ops = """ [] @@ -974,7 +973,6 @@ """ self.optimize_loop(ops, expected, preamble) - # ---------- def test_virtual_1(self): @@ -1252,7 +1250,6 @@ """ self.optimize_loop(ops, expected, preamble) - def test_virtual_constant_isnonnull(self): ops = """ [i0] @@ -2789,8 +2786,7 @@ p2 = new_with_vtable(ConstClass(node_vtable)) jump(p2) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + self.raises(InvalidLoop, self.optimize_loop, ops, "crash!") def test_invalid_loop_2(self): ops = """ @@ -2801,8 +2797,7 @@ escape(p2) # prevent it from staying Virtual jump(p2) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + self.raises(InvalidLoop, self.optimize_loop, ops, "crash!") def test_invalid_loop_3(self): ops = """ @@ -2824,8 +2819,7 @@ guard_value(p2, ConstPtr(myptr)) [] jump(p2) """ - exc = self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + exc = self.raises(InvalidLoop, self.optimize_loop, ops, "crash!") if exc: assert "node" in exc.msg @@ -3151,7 +3145,6 @@ """ self.optimize_loop(ops, expected) - def test_int_and_or_with_zero(self): ops = """ [i0, i1] @@ -5107,7 +5100,6 @@ """ self.optimize_loop(ops, expected) - def test_division_nonneg(self): py.test.skip("harder") # this is how an app-level division turns into right now @@ -5444,7 +5436,6 @@ """ self.optimize_loop(ops, ops, ops) - def test_mul_ovf(self): ops = """ [i0, i1] @@ -5591,7 +5582,6 @@ def is_integer_bounded(self): return False - for n in ('inst_w_seq', 'inst_index', 'inst_w_list', 'inst_length', 'inst_start', 'inst_step'): self.namespace[n] = FakeDescr(n) @@ -5847,7 +5837,7 @@ self.optimize_loop(ops, optops, preamble) # check with replacing 'str' with 'unicode' everywhere def r(s): - return s.replace('str','unicode').replace('s"', 'u"') + return s.replace('str', 'unicode').replace('s"', 'u"') self.optimize_loop(r(ops), r(optops), r(preamble)) def test_newstr_1(self): @@ -6277,7 +6267,7 @@ if isinstance(value, calldescrtype): extra = value.get_extra_info() if (extra and isinstance(extra, effectinfotype) and - extra.oopspecindex == oopspecindex): + extra.oopspecindex == oopspecindex): # returns 0 for 'func' in this test return value, 0 raise AssertionError("not found: oopspecindex=%d" % @@ -7395,7 +7385,6 @@ """ self.optimize_loop(ops, expected, expected_short=short) - def test_loopinvariant_constant_strgetitem(self): ops = """ [p0] @@ -7454,7 +7443,7 @@ """ self.optimize_loop(ops, expected, expected_short=short) - def test_propagate_virtual_arryalen(self): + def test_propagate_virtual_arraylen(self): ops = """ [p0] p404 = new_array(2, descr=arraydescr) @@ -7831,7 +7820,6 @@ """ self.optimize_loop(ops, expected) - def test_setarrayitem_followed_by_arraycopy(self): ops = """ [p1, p2] @@ -8124,7 +8112,6 @@ """ self.optimize_loop(ops, expected) - def test_issue1080_infinitie_loop_simple(self): ops = """ [p69] @@ -8149,8 +8136,7 @@ guard_value(p1, ConstPtr(myptr)) [] jump(p1) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, ops) + self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_licm_boxed_opaque_getitem(self): ops = """ @@ -8225,8 +8211,7 @@ guard_value(p1, ConstPtr(myptr)) [] jump(p1) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, ops) + self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_cond_call_with_a_constant(self): ops = """ @@ -8253,6 +8238,16 @@ """ self.optimize_loop(ops, expected) + def test_hippyvm_unroll_bug(self): + ops = """ + [p0, i1, i2] + i3 = int_add(i1, 1) + i4 = int_eq(i3, i2) + setfield_gc(p0, i4, descr=valuedescr) + jump(p0, i3, i2) + """ + self.optimize_loop(ops, ops) + + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass - diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -369,32 +369,21 @@ for rule in rules: m.rule(*rule) - objects = ' $(OBJECTS)' - create_obj_response_file = [] - if len(' '.join(rel_ofiles)) > 4000: - # cmd.exe has a limit of ~4000 characters before a command line is too long. - # Use a response file instead, at the cost of making the Makefile very ugly. - for i in range(len(rel_ofiles) - 1): - create_obj_response_file.append('echo %s >> obj_names.rsp' % \ - rel_ofiles[i]) - # use cmd /c for the last one so that the file is flushed - create_obj_response_file.append('cmd /c echo %s >> obj_names.rsp' % \ - rel_ofiles[-1]) - objects = ' @obj_names.rsp' if self.version < 80: m.rule('$(TARGET)', '$(OBJECTS)', - create_obj_response_file + [\ - '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' /out:$@ $(LIBDIRS) $(LIBS)', + [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA) /out:$@' +\ + ' $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', ]) else: m.rule('$(TARGET)', '$(OBJECTS)', - create_obj_response_file + [\ - '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST /MANIFESTFILE:$*.manifest', + [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + \ + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST' + \ + ' /MANIFESTFILE:$*.manifest @<<\n$(OBJECTS)\n<<', 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', ]) m.rule('debugmode_$(TARGET)', '$(OBJECTS)', - create_obj_response_file + [\ - '$(CC_LINK) /nologo /DEBUG $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS)', + [ '$(CC_LINK) /nologo /DEBUG $(LDFLAGS) $(LDFLAGSEXTRA)' + \ + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', ]) if shared: From noreply at buildbot.pypy.org Sat Mar 15 09:27:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 09:27:34 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: hg merge default Message-ID: <20140315082734.048401C31BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69971:6983332fc29c Date: 2014-03-15 09:27 +0100 http://bitbucket.org/pypy/pypy/changeset/6983332fc29c/ Log: hg merge default diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -213,7 +213,7 @@ if rffi.aroundstate.after: # After external call is before entering Python rffi.aroundstate.after() - return 0 + return lltype.nullptr(PyGILState_STATE.TO) @cpython_api([PyGILState_STATE], lltype.Void) def PyGILState_Release(space, state): From noreply at buildbot.pypy.org Sat Mar 15 11:11:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 11:11:21 +0100 (CET) Subject: [pypy-commit] stmgc default: shadowstack: improve the aliasing analysis in the C code. Message-ID: <20140315101121.CF6BA1C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1017:ddbc16971682 Date: 2014-03-15 11:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/ddbc16971682/ Log: shadowstack: improve the aliasing analysis in the C code. diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -150,7 +150,7 @@ /* In case of abort, we restore the 'shadowstack' field and the 'thread_local_obj' field. */ - object_t **shadowstack_at_start_of_transaction; + struct stm_shadowentry_s *shadowstack_at_start_of_transaction; object_t *threadlocal_at_start_of_transaction; /* For debugging */ diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -397,11 +397,11 @@ if (must_be_zero == (segment_base == get_segment_base(0))) { - object_t **current = tl->shadowstack; - object_t **base = tl->shadowstack_base; + struct stm_shadowentry_s *current = tl->shadowstack; + struct stm_shadowentry_s *base = tl->shadowstack_base; while (current-- != base) { - assert(*current != (object_t *)-1); - mark_visit_object(*current, segment_base); + assert(current->ss != (object_t *)-1); + mark_visit_object(current->ss, segment_base); } mark_visit_object(tl->thread_local_obj, segment_base); } diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -158,11 +158,11 @@ static void collect_roots_in_nursery(void) { stm_thread_local_t *tl = STM_SEGMENT->running_thread; - object_t **current = tl->shadowstack; - object_t **base = tl->shadowstack_base; + struct stm_shadowentry_s *current = tl->shadowstack; + struct stm_shadowentry_s *base = tl->shadowstack_base; while (current-- != base) { - assert(*current != (object_t *)-1); - minor_trace_if_young(current); + assert(current->ss != (object_t *)-1); + minor_trace_if_young(¤t->ss); } minor_trace_if_young(&tl->thread_local_obj); } diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -118,7 +118,8 @@ void _init_shadow_stack(stm_thread_local_t *tl) { - object_t **s = (object_t **)malloc(SHADOW_STACK_SIZE * sizeof(object_t *)); + struct stm_shadowentry_s *s = (struct stm_shadowentry_s *) + malloc(SHADOW_STACK_SIZE * sizeof(struct stm_shadowentry_s)); assert(s); tl->shadowstack = s; tl->shadowstack_base = s; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -48,9 +48,15 @@ }; #define STM_SEGMENT ((stm_segment_info_t *)4352) +struct stm_shadowentry_s { + /* Like stm_read_marker_s, this is a struct to enable better + aliasing analysis in the C code. */ + object_t *ss; +}; + typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ - object_t **shadowstack, **shadowstack_base; + struct stm_shadowentry_s *shadowstack, *shadowstack_base; /* a generic optional thread-local object */ object_t *thread_local_obj; /* in case this thread runs a transaction that aborts, @@ -218,9 +224,9 @@ /* Push and pop roots from/to the shadow stack. Only allowed inside transaction. */ -#define STM_PUSH_ROOT(tl, p) (*((tl).shadowstack++) = (object_t *)(p)) -#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))*(--(tl).shadowstack)) -#define STM_POP_ROOT_RET(tl) (*(--(tl).shadowstack)) +#define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) +#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) +#define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) /* Every thread needs to have a corresponding stm_thread_local_t diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -12,8 +12,12 @@ #define _STM_FAST_ALLOC ... #define _STM_GCFLAG_WRITE_BARRIER ... +struct stm_shadowentry_s { + object_t *ss; +}; + typedef struct { - object_t **shadowstack, **shadowstack_base; + struct stm_shadowentry_s *shadowstack, *shadowstack_base; object_t *thread_local_obj; char *mem_clear_on_abort; size_t mem_bytes_to_clear_on_abort; @@ -389,7 +393,7 @@ def _allocate_thread_local(): tl = ffi.new("stm_thread_local_t *") - ss = ffi.new("object_t *[]", SHADOWSTACK_LENGTH) + ss = ffi.new("struct stm_shadowentry_s[]", SHADOWSTACK_LENGTH) _keepalive[tl] = ss tl.shadowstack = ss tl.shadowstack_base = ss @@ -462,7 +466,7 @@ tl = self.tls[self.current_thread] curlength = tl.shadowstack - tl.shadowstack_base assert 0 <= curlength < SHADOWSTACK_LENGTH - tl.shadowstack[0] = ffi.cast("object_t *", o) + tl.shadowstack[0].ss = ffi.cast("object_t *", o) tl.shadowstack += 1 def pop_root(self): @@ -472,7 +476,7 @@ raise EmptyStack assert 0 < curlength <= SHADOWSTACK_LENGTH tl.shadowstack -= 1 - return ffi.cast("object_t *", tl.shadowstack[0]) + return ffi.cast("object_t *", tl.shadowstack[0].ss) def push_root_no_gc(self): "Pushes an invalid object, to crash in case the GC is called" From noreply at buildbot.pypy.org Sat Mar 15 11:20:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 11:20:34 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/ddbc16971682 Message-ID: <20140315102034.47F421C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69972:1d0b870195e7 Date: 2014-03-15 11:11 +0100 http://bitbucket.org/pypy/pypy/changeset/1d0b870195e7/ Log: import stmgc/ddbc16971682 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -7f6e7192dd38 +ddbc16971682 diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -151,7 +151,7 @@ /* In case of abort, we restore the 'shadowstack' field and the 'thread_local_obj' field. */ - object_t **shadowstack_at_start_of_transaction; + struct stm_shadowentry_s *shadowstack_at_start_of_transaction; object_t *threadlocal_at_start_of_transaction; /* For debugging */ diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -398,11 +398,11 @@ if (must_be_zero == (segment_base == get_segment_base(0))) { - object_t **current = tl->shadowstack; - object_t **base = tl->shadowstack_base; + struct stm_shadowentry_s *current = tl->shadowstack; + struct stm_shadowentry_s *base = tl->shadowstack_base; while (current-- != base) { - assert(*current != (object_t *)-1); - mark_visit_object(*current, segment_base); + assert(current->ss != (object_t *)-1); + mark_visit_object(current->ss, segment_base); } mark_visit_object(tl->thread_local_obj, segment_base); } diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -159,11 +159,11 @@ static void collect_roots_in_nursery(void) { stm_thread_local_t *tl = STM_SEGMENT->running_thread; - object_t **current = tl->shadowstack; - object_t **base = tl->shadowstack_base; + struct stm_shadowentry_s *current = tl->shadowstack; + struct stm_shadowentry_s *base = tl->shadowstack_base; while (current-- != base) { - assert(*current != (object_t *)-1); - minor_trace_if_young(current); + assert(current->ss != (object_t *)-1); + minor_trace_if_young(¤t->ss); } minor_trace_if_young(&tl->thread_local_obj); } diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -119,7 +119,8 @@ void _init_shadow_stack(stm_thread_local_t *tl) { - object_t **s = (object_t **)malloc(SHADOW_STACK_SIZE * sizeof(object_t *)); + struct stm_shadowentry_s *s = (struct stm_shadowentry_s *) + malloc(SHADOW_STACK_SIZE * sizeof(struct stm_shadowentry_s)); assert(s); tl->shadowstack = s; tl->shadowstack_base = s; diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -49,9 +49,15 @@ }; #define STM_SEGMENT ((stm_segment_info_t *)4352) +struct stm_shadowentry_s { + /* Like stm_read_marker_s, this is a struct to enable better + aliasing analysis in the C code. */ + object_t *ss; +}; + typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ - object_t **shadowstack, **shadowstack_base; + struct stm_shadowentry_s *shadowstack, *shadowstack_base; /* a generic optional thread-local object */ object_t *thread_local_obj; /* in case this thread runs a transaction that aborts, @@ -219,9 +225,9 @@ /* Push and pop roots from/to the shadow stack. Only allowed inside transaction. */ -#define STM_PUSH_ROOT(tl, p) (*((tl).shadowstack++) = (object_t *)(p)) -#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))*(--(tl).shadowstack)) -#define STM_POP_ROOT_RET(tl) (*(--(tl).shadowstack)) +#define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) +#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) +#define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) /* Every thread needs to have a corresponding stm_thread_local_t diff --git a/rpython/translator/stm/test/richards.py b/rpython/translator/stm/test/richards.py --- a/rpython/translator/stm/test/richards.py +++ b/rpython/translator/stm/test/richards.py @@ -433,7 +433,7 @@ if __name__ == '__main__': import sys - max_num_threads = 5 + max_num_threads = 2 if len(sys.argv) > 1: iterations = int(sys.argv[1]) if len(sys.argv) > 2: From noreply at buildbot.pypy.org Sat Mar 15 11:21:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 11:21:24 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Produce a breakdown of where the slow-down occurs Message-ID: <20140315102124.1C2CA1C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69973:e96443dab32f Date: 2014-03-15 11:20 +0100 http://bitbucket.org/pypy/pypy/changeset/e96443dab32f/ Log: Produce a breakdown of where the slow-down occurs diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -1,3 +1,32 @@ + +1b664888133d (March 15, 2014): the overhead of a non-JIT STM, when +compared with a non-JIT plain PyPy, is measured to be 54% in a +single-threaded trivial benchmark. A tentative break-down of this figure: + +* 15% are from stm_write() on writing non-GC pointers into GC objects + +* 7% are from stm_write() on writing a constant GC pointer (likely null) + (in a regular pypy, this doesn't emit a write_barrier) + +* 14% are from stm_read() + +* 3% (in this benchmark) is just a slower startup time + +* 6% where removed soon afterwards by ddbc16971682 + +* the rest: ~9% from unknown other places (may include accessing + prebuilt GC objects, which requires an indirection) + + + + + +=============================================================================== +=========== the rest is from stmgc-c4 =================================== +=============================================================================== + + + ------------------------------------------------------------ POSSIBLE BUG: From noreply at buildbot.pypy.org Sat Mar 15 11:26:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 11:26:58 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Update Message-ID: <20140315102658.24AD31C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69974:05e94e7b9235 Date: 2014-03-15 11:26 +0100 http://bitbucket.org/pypy/pypy/changeset/05e94e7b9235/ Log: Update diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -17,6 +17,9 @@ * the rest: ~9% from unknown other places (may include accessing prebuilt GC objects, which requires an indirection) +UPDATE: with ddbc16971682 the figure seems to be: only 38% slower. +Assuming that all other points stayed at the same overhead, it would +perfectly explain the slow-down. From noreply at buildbot.pypy.org Sat Mar 15 12:08:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 12:08:11 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add a blog draft Message-ID: <20140315110811.553F51C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5163:61f1eb9baa3a Date: 2014-03-15 12:08 +0100 http://bitbucket.org/pypy/extradoc/changeset/61f1eb9baa3a/ Log: Add a blog draft diff --git a/blog/draft/stm-mar2014.txt b/blog/draft/stm-mar2014.txt new file mode 100644 --- /dev/null +++ b/blog/draft/stm-mar2014.txt @@ -0,0 +1,29 @@ +Hi all, + +Here is one +of the first full PyPy's compiled with the new StmGC-c7 +library. It has no JIT so far, but it runs some small +single-threaded benchmarks by taking around 40% more time than a +corresponding non-STM, no-JIT version of PyPy. It scales --- up to two +threads only, which is the hard-coded maximum so far in the c7 code. +But the scaling looks perfect in these small benchmarks without +conflict: starting two threads each running a copy of the benchmark +takes almost exactly the same amount of total time, simply using two +cores. + +Feel free to try it! It is not actually useful so far, because it is +limited to two cores and CPython is something like 2.5x faster. One of +the important next steps is to re-enable the JIT. Based on our current +understanding the "40%" figure, we can probably reduce it with +enough efforts; but also, the JIT should be able to easily produce +machine code that suffers a bit less than the interpreter from these +effects. This seems to mean that we're looking at 20%-ish slow-downs +for the future PyPy-STM-JIT. + +Interesting times :-) + + +Armin (as well as Remi for the work) From noreply at buildbot.pypy.org Sat Mar 15 17:28:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 17:28:10 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: A branch in which to try to support more than 2 segments Message-ID: <20140315162810.76E451C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1018:ce2390ac80bf Date: 2014-03-15 17:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/ce2390ac80bf/ Log: A branch in which to try to support more than 2 segments From noreply at buildbot.pypy.org Sat Mar 15 17:28:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 17:28:11 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Kill unused stuff Message-ID: <20140315162811.967EF1C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1019:1eef635359f7 Date: 2014-03-15 17:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/1eef635359f7/ Log: Kill unused stuff diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -59,7 +59,7 @@ the common case. Otherwise, we need to compute it based on its location and size. */ if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) { - pages_privatize(first_page, 1, true); + pages_privatize(first_page, 1); } else { char *realobj; @@ -73,7 +73,7 @@ /* that's the page *following* the last page with the object */ end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; - pages_privatize(first_page, end_page - first_page, true); + pages_privatize(first_page, end_page - first_page); } } else if (write_locks[lock_idx] == lock_num) { diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -133,7 +133,7 @@ } #endif -static void privatize_range(uintptr_t pagenum, uintptr_t count, bool full) +static void privatize_range(uintptr_t pagenum, uintptr_t count) { ssize_t pgoff1 = pagenum; ssize_t pgoff2 = pagenum + NB_PAGES; @@ -146,29 +146,16 @@ memset(flag_page_private + pagenum, REMAPPING_PAGE, count); d_remap_file_pages(localpg, count * 4096, pgoff2); uintptr_t i; - if (full) { - for (i = 0; i < count; i++) { - pagecopy(localpg + 4096 * i, otherpg + 4096 * i); - } - } - else { - pagecopy(localpg, otherpg); - if (count > 1) - pagecopy(localpg + 4096 * (count-1), otherpg + 4096 * (count-1)); + for (i = 0; i < count; i++) { + pagecopy(localpg + 4096 * i, otherpg + 4096 * i); } write_fence(); memset(flag_page_private + pagenum, PRIVATE_PAGE, count); increment_total_allocated(4096 * count); } -static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) +static void _pages_privatize(uintptr_t pagenum, uintptr_t count) { - /* narrow the range of pages to privatize from the end: */ - while (flag_page_private[pagenum + count - 1] == PRIVATE_PAGE) { - if (!--count) - return; - } - mutex_pages_lock(); uintptr_t page_start_range = pagenum; @@ -179,7 +166,7 @@ if (prev == PRIVATE_PAGE) { if (pagenum > page_start_range) { privatize_range(page_start_range, - pagenum - page_start_range, full); + pagenum - page_start_range); } page_start_range = pagenum + 1; } @@ -190,7 +177,7 @@ if (pagenum > page_start_range) { privatize_range(page_start_range, - pagenum - page_start_range, full); + pagenum - page_start_range); } mutex_pages_unlock(); diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -21,9 +21,8 @@ static uint8_t flag_page_private[NB_PAGES]; -static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full); +static void _pages_privatize(uintptr_t pagenum, uintptr_t count); static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); -//static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count); static void mutex_pages_lock(void); static void mutex_pages_unlock(void); @@ -32,8 +31,7 @@ static void force_major_collection_request(void); static void reset_major_collection_requested(void); -inline static void pages_privatize(uintptr_t pagenum, uintptr_t count, - bool full) { +inline static void pages_privatize(uintptr_t pagenum, uintptr_t count) { /* This is written a bit carefully so that a call with a constant count == 1 will turn this loop into just one "if". */ while (flag_page_private[pagenum] == PRIVATE_PAGE) { @@ -42,7 +40,5 @@ } pagenum++; } - _pages_privatize(pagenum, count, full); + _pages_privatize(pagenum, count); } - -/* static bool is_fully_in_shared_pages(object_t *obj); */ From noreply at buildbot.pypy.org Sat Mar 15 17:28:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 17:28:12 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Disable this too Message-ID: <20140315162812.A763F1C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1020:f8c3c7960895 Date: 2014-03-15 17:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/f8c3c7960895/ Log: Disable this too diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -193,6 +193,7 @@ /************************************************************/ +#if 0 static inline void mark_single_flag_private(uintptr_t pagenum) { if (flag_page_private[pagenum] == PRIVATE_PAGE) { @@ -314,6 +315,7 @@ (uninitialized_page_stop - stm_object_pages) / 4096UL, NB_PAGES); } +#endif /************************************************************/ @@ -346,11 +348,13 @@ while (1) { +#if 0 /* first, if we're not seeing segment 0, we must change the flags in flag_page_private[] from PRIVATE_PAGE to SEGMENT1_PAGE, which will mean "can't re-share" */ if (segment_base != stm_object_pages && RESHARE_PAGES) mark_flag_page_private(obj, segment_base); +#endif /* trace into the object (the version from 'segment_base') */ struct object_s *realobj = @@ -549,8 +553,10 @@ /* sweeping */ mutex_pages_lock(); +#if 0 if (RESHARE_PAGES) major_reshare_pages(); +#endif sweep_large_objects(); //sweep_uniform_pages(); mutex_pages_unlock(); diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -13,10 +13,6 @@ /* Page is private for each segment. */ PRIVATE_PAGE, - - /* gcpage.c: page contains objects that have been traced in the - segment > 0 */ - SEGMENT1_PAGE, }; static uint8_t flag_page_private[NB_PAGES]; From noreply at buildbot.pypy.org Sat Mar 15 17:28:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 17:28:13 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix for a remaining inevitable transaction Message-ID: <20140315162813.BC90B1C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1021:2203a869efb3 Date: 2014-03-15 17:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/2203a869efb3/ Log: Fix for a remaining inevitable transaction diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -413,7 +413,10 @@ if lib._stm_in_transaction(tl): if self.current_thread != n: self.switch(n) - self.abort_transaction() + if lib.stm_is_inevitable(): + self.commit_transaction() # must succeed! + else: + self.abort_transaction() for tl in self.tls: lib.stm_unregister_thread_local(tl) lib.stm_teardown() From noreply at buildbot.pypy.org Sat Mar 15 17:28:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 17:28:14 +0100 (CET) Subject: [pypy-commit] stmgc default: Skip this test for now Message-ID: <20140315162814.C4E441C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1022:6b3a3103ccc4 Date: 2014-03-15 17:26 +0100 http://bitbucket.org/pypy/stmgc/changeset/6b3a3103ccc4/ Log: Skip this test for now diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -146,6 +146,7 @@ class TestIsolation(BaseTest): def test_not_break(self): + py.test.xfail("known to fail") lpold = stm_allocate_old_refs(1) self.start_transaction() From noreply at buildbot.pypy.org Sat Mar 15 17:48:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 17:48:12 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add a paragraph about how to try it Message-ID: <20140315164812.4E86E1C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5164:5fab61cd53f6 Date: 2014-03-15 17:48 +0100 http://bitbucket.org/pypy/extradoc/changeset/5fab61cd53f6/ Log: Add a paragraph about how to try it diff --git a/blog/draft/stm-mar2014.txt b/blog/draft/stm-mar2014.txt --- a/blog/draft/stm-mar2014.txt +++ b/blog/draft/stm-mar2014.txt @@ -17,7 +17,7 @@ limited to two cores and CPython is something like 2.5x faster. One of the important next steps is to re-enable the JIT. Based on our current -understanding the "40%" figure, we can probably reduce it with +understanding of the "40%" figure, we can probably reduce it with enough efforts; but also, the JIT should be able to easily produce machine code that suffers a bit less than the interpreter from these effects. This seems to mean that we're looking at 20%-ish slow-downs @@ -25,5 +25,15 @@ Interesting times :-) +For reference, this is what you get by downloading the +PyPy binary linked above: a Linux 64 binary (Ubuntu 12.04) that +should behave mostly like a regular PyPy. (One main missing feature is +that destructors are never called.) It uses two cores, but obviously +only if the Python program you run is multithreaded. The only new +built-in feature is with __pypy__.thread.atomic: this gives +you a way to enforce that a block of code runs "atomically", which means +without any operation from any other thread randomly interleaved. -Armin (as well as Remi for the work) + +Armin & Remi From noreply at buildbot.pypy.org Sat Mar 15 17:50:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 17:50:04 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: A goodbye-like sentence Message-ID: <20140315165004.01D491C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5165:c352d811ef4a Date: 2014-03-15 17:49 +0100 http://bitbucket.org/pypy/extradoc/changeset/c352d811ef4a/ Log: A goodbye-like sentence diff --git a/blog/draft/stm-mar2014.txt b/blog/draft/stm-mar2014.txt --- a/blog/draft/stm-mar2014.txt +++ b/blog/draft/stm-mar2014.txt @@ -35,5 +35,7 @@ you a way to enforce that a block of code runs "atomically", which means without any operation from any other thread randomly interleaved. +Stay tuned for more! + Armin & Remi From noreply at buildbot.pypy.org Sat Mar 15 18:00:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 18:00:21 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Yet another small paragraph Message-ID: <20140315170021.0E1371C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5166:f2fa60f6ed2b Date: 2014-03-15 18:00 +0100 http://bitbucket.org/pypy/extradoc/changeset/f2fa60f6ed2b/ Log: Yet another small paragraph diff --git a/blog/draft/stm-mar2014.txt b/blog/draft/stm-mar2014.txt --- a/blog/draft/stm-mar2014.txt +++ b/blog/draft/stm-mar2014.txt @@ -35,6 +35,11 @@ you a way to enforce that a block of code runs "atomically", which means without any operation from any other thread randomly interleaved. +If you want to translate it yourself, you need a trunk version of clang +with three patches applied. That's the number of bugs that we couldn't +find workarounds for, not the total number of bugs we found by (ab)using +the address_space feature... + Stay tuned for more! From noreply at buildbot.pypy.org Sat Mar 15 18:18:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 18:18:25 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Mention multiple stm_writes in a row Message-ID: <20140315171825.5EA481C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69975:9cbbb042ad60 Date: 2014-03-15 18:17 +0100 http://bitbucket.org/pypy/pypy/changeset/9cbbb042ad60/ Log: Mention multiple stm_writes in a row diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -21,6 +21,14 @@ Assuming that all other points stayed at the same overhead, it would perfectly explain the slow-down. +------------------------------------------------------------ + +clang doesn't optimize multiple stm_write() in a row (unlike GCC). +Optimize them manually... + +------------------------------------------------------------ + + From noreply at buildbot.pypy.org Sat Mar 15 18:42:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 18:42:16 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Write a long comment about the new model I'm aiming for. Message-ID: <20140315174216.C6B6B1C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1023:fa8bfb4eb410 Date: 2014-03-15 18:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/fa8bfb4eb410/ Log: Write a long comment about the new model I'm aiming for. diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -1,21 +1,32 @@ -enum /* flag_page_private */ { - /* The page is not in use. Assume that each segment sees its own copy. */ - FREE_PAGE=0, +/* For every page, 'num_segments_sharing_page' stores a number that + counts the number of segments that share the page. If 0, the page is + not used so far. - /* The page is shared by all segments. Each segment sees the same - physical page (the one that is within the segment 0 mmap address). */ - SHARED_PAGE, + When the page is first initialized, 'num_segments_sharing_page' is + set to NB_SEGMENTS. When later a segment wants a private copy, it + looks first in its own 'private_page_mapping' tree, which maps shared + pages to private copies. If not found, then it proceeds like this: - /* For only one range of pages at a time, around the call to - remap_file_pages() that un-shares the pages (SHARED -> PRIVATE). */ - REMAPPING_PAGE, + If 'num_segments_sharing_page' is greater than 1, then it is + decremented and a private copy of the page is made. - /* Page is private for each segment. */ - PRIVATE_PAGE, -}; + If 'num_segments_sharing_page' is equal to 1, then we know we are the + last segment that sees this "shared" copy, and so it is actually not + shared with anybody else --- i.e. it is private already. -static uint8_t flag_page_private[NB_PAGES]; + The shared copy of a page is stored in the mmap at the file offset + corresponding to the segment 0 offset (with all other segments + remapping to the segment 0 offset). Private copies are made in the + offset from segment 1 (and if full, more segments afterwards), + picking file offsets that are simply the next free ones. This is + probably good for long-term memory usage: a major collection looks + for pages that are no-longer-used private copies of some shared page, + and discard them, remapping the address to the shared page. The + pages thus freed are recorded into a free list, and can be reused as + the private copies of the following (unrelated) pages. +*/ +static uint8_t num_segments_sharing_page[NB_PAGES]; static void _pages_privatize(uintptr_t pagenum, uintptr_t count); static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); From noreply at buildbot.pypy.org Sat Mar 15 19:01:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 19:01:24 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Complete Message-ID: <20140315180124.8F0CB1C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1024:4ab1f6857a09 Date: 2014-03-15 18:54 +0100 http://bitbucket.org/pypy/stmgc/changeset/4ab1f6857a09/ Log: Complete diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -15,16 +15,31 @@ last segment that sees this "shared" copy, and so it is actually not shared with anybody else --- i.e. it is private already. + (This means that 'num_segments_sharing_page' is basically just an + optimization. Without it, we might need 'NB_SEGMENTS + 1' copies of + the same data; with it, we can bound the number to 'NB_SEGMENTS'. + This is probably important if NB_SEGMENTS is very small.) + The shared copy of a page is stored in the mmap at the file offset corresponding to the segment 0 offset (with all other segments remapping to the segment 0 offset). Private copies are made in the offset from segment 1 (and if full, more segments afterwards), picking file offsets that are simply the next free ones. This is probably good for long-term memory usage: a major collection looks - for pages that are no-longer-used private copies of some shared page, + for pages that are no-longer-used private copies of some shared page(*), and discard them, remapping the address to the shared page. The pages thus freed are recorded into a free list, and can be reused as the private copies of the following (unrelated) pages. + + (*) an additional subtlety here is that the shared page should not + contain uncommitted changes; if 'num_segments_sharing_page' is 1 this + can occur. + + Note that this page manipulation logic is independent from actually + tracking which objects are uncommitted, which occurs at the level of + segment-relative offsets; and propagating changes during commit, + which is done by copying objects (not pages) to the same offset + relative to a different segment. */ static uint8_t num_segments_sharing_page[NB_PAGES]; From noreply at buildbot.pypy.org Sat Mar 15 19:01:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Mar 2014 19:01:25 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Kill again 'num_segments_sharing_page' for now; it's just an additional complication. Message-ID: <20140315180125.BBF5B1C0124@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1025:49e2782f8a0b Date: 2014-03-15 19:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/49e2782f8a0b/ Log: Kill again 'num_segments_sharing_page' for now; it's just an additional complication. diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -1,39 +1,19 @@ -/* For every page, 'num_segments_sharing_page' stores a number that - counts the number of segments that share the page. If 0, the page is - not used so far. - - When the page is first initialized, 'num_segments_sharing_page' is - set to NB_SEGMENTS. When later a segment wants a private copy, it - looks first in its own 'private_page_mapping' tree, which maps shared - pages to private copies. If not found, then it proceeds like this: - - If 'num_segments_sharing_page' is greater than 1, then it is - decremented and a private copy of the page is made. - - If 'num_segments_sharing_page' is equal to 1, then we know we are the - last segment that sees this "shared" copy, and so it is actually not - shared with anybody else --- i.e. it is private already. - - (This means that 'num_segments_sharing_page' is basically just an - optimization. Without it, we might need 'NB_SEGMENTS + 1' copies of - the same data; with it, we can bound the number to 'NB_SEGMENTS'. - This is probably important if NB_SEGMENTS is very small.) +/* This handles pages of objects outside the nursery. Every page + has a "shared copy" and zero or more "private copies". The shared copy of a page is stored in the mmap at the file offset corresponding to the segment 0 offset (with all other segments remapping to the segment 0 offset). Private copies are made in the offset from segment 1 (and if full, more segments afterwards), - picking file offsets that are simply the next free ones. This is - probably good for long-term memory usage: a major collection looks - for pages that are no-longer-used private copies of some shared page(*), - and discard them, remapping the address to the shared page. The - pages thus freed are recorded into a free list, and can be reused as - the private copies of the following (unrelated) pages. + picking file offsets that are simply the next free ones. Each + segment maintains a tree 'private_page_mapping', which maps shared + pages to private copies. - (*) an additional subtlety here is that the shared page should not - contain uncommitted changes; if 'num_segments_sharing_page' is 1 this - can occur. + A major collection looks for pages that are no-longer-used private + copies, and discard them, remapping the address to the shared page. + The pages thus freed are recorded into a free list, and can be reused + as the private copies of the following (unrelated) pages. Note that this page manipulation logic is independent from actually tracking which objects are uncommitted, which occurs at the level of @@ -41,7 +21,6 @@ which is done by copying objects (not pages) to the same offset relative to a different segment. */ -static uint8_t num_segments_sharing_page[NB_PAGES]; static void _pages_privatize(uintptr_t pagenum, uintptr_t count); static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); From noreply at buildbot.pypy.org Sun Mar 16 00:03:43 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 16 Mar 2014 00:03:43 +0100 (CET) Subject: [pypy-commit] pypy default: a better fix for ignoring pragma commane(lib...) on MSVC Message-ID: <20140315230343.145051C03A3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69976:b03b791c1e26 Date: 2014-03-16 00:50 +0200 http://bitbucket.org/pypy/pypy/changeset/b03b791c1e26/ Log: a better fix for ignoring pragma commane(lib...) on MSVC diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -65,7 +65,7 @@ # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] # prevent linking with python27.lib - kwds["compile_extra"].append("/DPy_BUILD_CORE") + kwds["link_extra"]=["/NODEFAULTLIB:Python27.lib"] elif sys.platform == 'darwin': kwds["link_files"] = [str(api_library + '.dylib')] else: From noreply at buildbot.pypy.org Sun Mar 16 00:03:44 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 16 Mar 2014 00:03:44 +0100 (CET) Subject: [pypy-commit] pypy default: a more generic extern declaration Message-ID: <20140315230344.6E3041C03A3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69977:0e3e28702d34 Date: 2014-03-16 00:55 +0200 http://bitbucket.org/pypy/pypy/changeset/0e3e28702d34/ Log: a more generic extern declaration diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -20,7 +20,7 @@ #define PyArrayObject PyObject #define PyArray_Descr PyObject -extern PyTypeObject PyArray_Type; +PyAPI_DATA(PyTypeObject) PyArray_Type; typedef unsigned char npy_bool; typedef unsigned char npy_uint8; From noreply at buildbot.pypy.org Sun Mar 16 01:51:56 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 16 Mar 2014 01:51:56 +0100 (CET) Subject: [pypy-commit] pypy default: don't hardcode the version Message-ID: <20140316005156.DDF821C0124@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69978:ad48ce2a9283 Date: 2014-03-15 17:51 -0700 http://bitbucket.org/pypy/pypy/changeset/ad48ce2a9283/ Log: don't hardcode the version diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,8 +64,10 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] - # prevent linking with python27.lib - kwds["link_extra"]=["/NODEFAULTLIB:Python27.lib"] + # prevent linking with PythonXX.lib + w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2] + kwds["link_extra"] = ["/NODEFAULTLIB:Python%d%d.lib" % + (space.int_w(w_maj), space.int_w(w_min))] elif sys.platform == 'darwin': kwds["link_files"] = [str(api_library + '.dylib')] else: From noreply at buildbot.pypy.org Sun Mar 16 07:49:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 07:49:14 +0100 (CET) Subject: [pypy-commit] stmgc default: Oups (for tests only) Message-ID: <20140316064914.65D0C1C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1026:13df2f3e0e20 Date: 2014-03-16 07:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/13df2f3e0e20/ Log: Oups (for tests only) diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -60,7 +60,7 @@ stm_fatalerror("cond destroy: %m\n"); } - memset(&sync_ctl, 0, sizeof(sync_ctl.in_use)); + memset(&sync_ctl, 0, sizeof(sync_ctl)); } #ifndef NDEBUG From noreply at buildbot.pypy.org Sun Mar 16 09:29:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 09:29:56 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: in-progress Message-ID: <20140316082956.7B5001C0670@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1027:67d4e6e71904 Date: 2014-03-16 09:29 +0100 http://bitbucket.org/pypy/stmgc/changeset/67d4e6e71904/ Log: in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -29,6 +29,8 @@ safepoints that may be issued in write_write_contention_management(). */ stm_read(obj); + /* XXX XXX XXX make the logic of write-locking objects optional! */ + /* claim the write-lock for this object. In case we're running the same transaction since a long while, the object can be already in 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, @@ -59,12 +61,12 @@ the common case. Otherwise, we need to compute it based on its location and size. */ if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) { - pages_privatize(first_page, 1); + page_privatize(first_page); } else { char *realobj; size_t obj_size; - uintptr_t end_page; + uintptr_t i, end_page; /* get the size of the object */ realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); @@ -73,7 +75,9 @@ /* that's the page *following* the last page with the object */ end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; - pages_privatize(first_page, end_page - first_page); + for (i = first_page; i < end_page; i++) { + page_privatize(i); + } } } else if (write_locks[lock_idx] == lock_num) { @@ -108,7 +112,7 @@ /* for sanity, check that all other segment copies of this object still have the flag */ long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (i != STM_SEGMENT->segment_num) assert(((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) ->stm_flags & GCFLAG_WRITE_BARRIER); @@ -193,43 +197,51 @@ /************************************************************/ -#if NB_SEGMENTS != 2 -# error "The logic in the functions below only works with two segments" -#endif static bool detect_write_read_conflicts(void) { - long remote_num = 1 - STM_SEGMENT->segment_num; - char *remote_base = get_segment_base(remote_num); - uint8_t remote_version = get_segment(remote_num)->transaction_read_version; + /* Detect conflicts of the form: we want to commit a write to an object, + but the same object was also read in a different thread. + */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { - if (get_priv_segment(remote_num)->transaction_state == TS_NONE) - return false; /* no need to check */ + if (i == STM_SEGMENT->segment_num) + continue; - if (is_aborting_now(remote_num)) - return false; /* no need to check: is pending immediate abort */ + if (get_priv_segment(i)->transaction_state == TS_NONE) + continue; /* no need to check */ - LIST_FOREACH_R( - STM_PSEGMENT->modified_old_objects, - object_t * /*item*/, - ({ - if (was_read_remote(remote_base, item, remote_version)) { - /* A write-read conflict! */ - write_read_contention_management(remote_num); + if (is_aborting_now(i)) + continue; /* no need to check: is pending immediate abort */ - /* If we reach this point, we didn't abort, but maybe we - had to wait for the other thread to commit. If we - did, then we have to restart committing from our call - to synchronize_all_threads(). */ - return true; - } - })); + char *remote_base = get_segment_base(i); + uint8_t remote_version = get_segment(i)->transaction_read_version; + + LIST_FOREACH_R( + STM_PSEGMENT->modified_old_objects, + object_t * /*item*/, + ({ + if (was_read_remote(remote_base, item, remote_version)) { + /* A write-read conflict! */ + write_read_contention_management(i); + + /* If we reach this point, we didn't abort, but maybe we + had to wait for the other thread to commit. If we + did, then we have to restart committing from our call + to synchronize_all_threads(). */ + return true; + } + })); + } return false; } static void synchronize_overflow_object_now(object_t *obj) { + abort();//XXX +#if 0 assert(!_is_young(obj)); assert((obj->stm_flags & GCFLAG_SMALL_UNIFORM) == 0); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); @@ -264,6 +276,7 @@ long i; char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); for (i = 0; i < NB_SEGMENTS; i++) { + abort();//XXX if (i != STM_SEGMENT->segment_num) { char *dst = REAL_ADDRESS(get_segment_base(i), start); memcpy(dst, src, copy_size); @@ -273,6 +286,68 @@ start = (start + 4096) & ~4095; } while (first_page++ < last_page); +#endif +} + +static void synchronize_object_now(object_t *obj) +{ + /* Assume that the version of 'obj' in the shared pages is up-to-date. + Assume also that the version in our own private page is up-to-date. + This function updates the private page of other threads. + */ + assert(!_is_young(obj)); + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + + uintptr_t start = (uintptr_t)obj; + uintptr_t first_page = start / 4096UL; + long i; + + if (obj->stm_flags & GCFLAG_SMALL_UNIFORM) { + abort();//XXX WRITE THE FAST CASE + } + else { + char *realobj = REAL_ADDRESS(stm_object_pages, obj); + ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + assert(obj_size >= 16); + uintptr_t end = start + obj_size; + uintptr_t last_page = (end - 1) / 4096UL; + + for (; first_page <= last_page; first_page++) { + + for (i = 1; i < NB_SEGMENTS; i++) { + + if (i == STM_SEGMENT->segment_num) + continue; + + if (!is_private_page(i, first_page)) + continue; + + /* The page is a PRIVATE_PAGE. We need to diffuse this + fragment of object from the shared page to this private + page. */ + + uintptr_t copy_size; + if (first_page == last_page) { + /* this is the final fragment */ + copy_size = end - start; + } + else { + /* this is a non-final fragment, going up to the + page's end */ + copy_size = 4096 - (start & 4095); + } + + /* double-check that the result fits in one page */ + assert(copy_size > 0); + assert(copy_size + (start & 4095) <= 4096); + + char *src = REAL_ADDRESS(stm_object_pages, start); + char *dst = REAL_ADDRESS(get_segment_base(i), start); + memcpy(dst, src, copy_size); + } + start = (start + 4096) & ~4095; + } + } } static void push_overflow_objects_from_privatized_pages(void) @@ -286,22 +361,12 @@ static void push_modified_to_other_segments(void) { - long remote_num = 1 - STM_SEGMENT->segment_num; char *local_base = STM_SEGMENT->segment_base; - char *remote_base = get_segment_base(remote_num); - bool remote_active = - (get_priv_segment(remote_num)->transaction_state != TS_NONE && - get_segment(remote_num)->nursery_end != NSE_SIGABORT); LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, ({ - if (remote_active) { - assert(!was_read_remote(remote_base, item, - get_segment(remote_num)->transaction_read_version)); - } - /* clear the write-lock (note that this runs with all other threads paused, so no need to be careful about ordering) */ uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; @@ -313,11 +378,14 @@ minor_collection() */ assert((item->stm_flags & GCFLAG_WRITE_BARRIER) != 0); - /* copy the modified object to the other segment */ + /* copy the modified object to the shared copy */ char *src = REAL_ADDRESS(local_base, item); - char *dst = REAL_ADDRESS(remote_base, item); + char *dst = REAL_ADDRESS(stm_object_pages, item); ssize_t size = stmcb_size_rounded_up((struct object_s *)src); memcpy(dst, src, size); + + /* copy the object to the other private pages as needed */ + synchronize_object_now(item); })); list_clear(STM_PSEGMENT->modified_old_objects); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -20,7 +20,7 @@ #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) #define NB_NURSERY_PAGES (STM_GC_NURSERY/4) -#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_SEGMENTS) +#define TOTAL_MEMORY (NB_PAGES * 4096UL * (1 + NB_SEGMENTS)) #define READMARKER_END ((NB_PAGES * 4096UL) >> 4) #define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) #define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE @@ -75,6 +75,13 @@ struct stm_priv_segment_info_s { struct stm_segment_info_s pub; + /* Dict whose keys are shared page numbers, and whose values are + the corresponding private page number. */ + struct tree_s *private_page_mapping; + + /* Head of a free list of private pages. */ + uintptr_t private_free_page_num; + /* List of old objects (older than the current transaction) that the current transaction attempts to modify. This is used to track the STM status: they are old objects that where written to and @@ -178,10 +185,6 @@ static char *stm_object_pages; static stm_thread_local_t *stm_all_thread_locals = NULL; -#ifdef STM_TESTS -static char *stm_other_pages; -#endif - static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START]; diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -43,7 +43,7 @@ #ifdef STM_TESTS uint8_t _stm_get_page_flag(uintptr_t index) { - return flag_page_private[index]; + abort();//XXX } long _stm_count_modified_old_objects(void) diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -26,7 +26,7 @@ _stm_nursery_start = NURSERY_START; long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { get_segment(i)->nursery_current = (stm_char *)NURSERY_START; get_segment(i)->nursery_end = NURSERY_END; } @@ -378,7 +378,7 @@ _stm_nursery_start = NURSERY_END - free_count; long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if ((uintptr_t)get_segment(i)->nursery_current < _stm_nursery_start) get_segment(i)->nursery_current = (stm_char *)_stm_nursery_start; } @@ -411,7 +411,7 @@ int original_num = STM_SEGMENT->segment_num; long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); if (MINOR_NOTHING_TO_DO(pseg)) /*TS_NONE segments have NOTHING_TO_DO*/ continue; diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -102,85 +102,44 @@ segment 0. */ uintptr_t i; assert(_has_mutex_pages()); - for (i = 1; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); d_remap_file_pages(segment_base + pagenum * 4096UL, count * 4096UL, pagenum); } - for (i = 0; i < count; i++) - flag_page_private[pagenum + i] = SHARED_PAGE; } -#if 0 -static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count) +static void page_privatize(uintptr_t pagenum) { - /* Same as pages_initialize_shared(), but tries hard to minimize the - total number of pages that remap_file_pages() must handle, by - fragmenting calls as much as possible (the overhead of one system - call appears smaller as the overhead per page). */ - uintptr_t start, i = 0; - while (i < count) { - if (flag_page_private[pagenum + (i++)] == SHARED_PAGE) - continue; - start = i; /* first index of a private page */ - while (1) { - i++; - if (i == count || flag_page_private[pagenum + i] == SHARED_PAGE) - break; - } - pages_initialize_shared(pagenum + start, i - start); + wlog_t *item; + TREE_FIND(*STM_PSEGMENT->private_page_mapping, pagenum, item, + goto not_found); + + /* the page is already privatized */ + return; + + not_found:; + /* look up the next free page */ + uintptr_t free_page_num = STM_PSEGMENT->private_free_page_num; + + /* "mount" it in the segment */ + char *new_page = STM_SEGMENT->segment_base + pagenum * 4096UL; + d_remap_file_pages(new_page, 4096, + NB_PAGES * STM_SEGMENT->segment_num + free_page_num); + increment_total_allocated(4096); + + /* update private_free_page_num */ + uintptr_t future_page = *(uintptr_t *)new_page; + if (future_page == 0) { + future_page = free_page_num + 1; } -} -#endif + STM_PSEGMENT->private_free_page_num = future_page; -static void privatize_range(uintptr_t pagenum, uintptr_t count) -{ - ssize_t pgoff1 = pagenum; - ssize_t pgoff2 = pagenum + NB_PAGES; - ssize_t localpgoff = pgoff1 + NB_PAGES * STM_SEGMENT->segment_num; - ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - STM_SEGMENT->segment_num); + /* copy the content from the shared (segment 0) source */ + pagecopy(new_page, stm_object_pages + pagenum * 4096UL); - void *localpg = stm_object_pages + localpgoff * 4096UL; - void *otherpg = stm_object_pages + otherpgoff * 4096UL; - - memset(flag_page_private + pagenum, REMAPPING_PAGE, count); - d_remap_file_pages(localpg, count * 4096, pgoff2); - uintptr_t i; - for (i = 0; i < count; i++) { - pagecopy(localpg + 4096 * i, otherpg + 4096 * i); - } - write_fence(); - memset(flag_page_private + pagenum, PRIVATE_PAGE, count); - increment_total_allocated(4096 * count); -} - -static void _pages_privatize(uintptr_t pagenum, uintptr_t count) -{ - mutex_pages_lock(); - - uintptr_t page_start_range = pagenum; - uintptr_t pagestop = pagenum + count; - - for (; pagenum < pagestop; pagenum++) { - uint8_t prev = flag_page_private[pagenum]; - if (prev == PRIVATE_PAGE) { - if (pagenum > page_start_range) { - privatize_range(page_start_range, - pagenum - page_start_range); - } - page_start_range = pagenum + 1; - } - else { - assert(prev == SHARED_PAGE); - } - } - - if (pagenum > page_start_range) { - privatize_range(page_start_range, - pagenum - page_start_range); - } - - mutex_pages_unlock(); + /* update private_page_mapping */ + tree_insert(STM_PSEGMENT->private_page_mapping, pagenum, free_page_num); } #if 0 diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -4,8 +4,8 @@ The shared copy of a page is stored in the mmap at the file offset corresponding to the segment 0 offset (with all other segments - remapping to the segment 0 offset). Private copies are made in the - offset from segment 1 (and if full, more segments afterwards), + remapping to the segment 0 offset). Private copies for segment N are + made in the offset from segment N (for 1 <= N <= NB_SEGMENTS), picking file offsets that are simply the next free ones. Each segment maintains a tree 'private_page_mapping', which maps shared pages to private copies. @@ -15,14 +15,14 @@ The pages thus freed are recorded into a free list, and can be reused as the private copies of the following (unrelated) pages. - Note that this page manipulation logic is independent from actually - tracking which objects are uncommitted, which occurs at the level of - segment-relative offsets; and propagating changes during commit, - which is done by copying objects (not pages) to the same offset - relative to a different segment. + Note that this page manipulation logic uses remap_file_pages() to + fully hide its execution cost behind the CPU's memory management unit. + It should not be confused with the logic of tracking which objects + are old-and-committed, old-but-modified, overflow objects, and so on + (which works at the object granularity, not the page granularity). */ -static void _pages_privatize(uintptr_t pagenum, uintptr_t count); +static void page_privatize(uintptr_t pagenum); static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void mutex_pages_lock(void); @@ -32,14 +32,8 @@ static void force_major_collection_request(void); static void reset_major_collection_requested(void); -inline static void pages_privatize(uintptr_t pagenum, uintptr_t count) { - /* This is written a bit carefully so that a call with a constant - count == 1 will turn this loop into just one "if". */ - while (flag_page_private[pagenum] == PRIVATE_PAGE) { - if (!--count) { - return; - } - pagenum++; - } - _pages_privatize(pagenum, count); +static inline bool is_private_page(long segnum, uintptr_t pagenum) +{ + return tree_contains(get_priv_segment(segnum)->private_page_mapping, + pagenum); } diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -26,12 +26,15 @@ if (stm_object_pages == MAP_FAILED) stm_fatalerror("initial stm_object_pages mmap() failed: %m\n"); + /* The segment 0 is not used to run transactions, but to contain the + shared copy of the pages. We mprotect all pages before so that + accesses fail, up to and including the pages corresponding to the + nurseries of the other segments. */ + mprotect(stm_object_pages, END_NURSERY_PAGE * 4096UL, PROT_NONE); + long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); -#ifdef STM_TESTS - stm_other_pages = segment_base; -#endif /* In each segment, the first page is where TLPREFIX'ed NULL accesses land. We mprotect it so that accesses fail. */ @@ -39,7 +42,7 @@ /* Fill the TLS page (page 1) with 0xDC, for debugging */ memset(REAL_ADDRESS(segment_base, 4096), 0xDC, 4096); - /* Make a "hole" at STM_PSEGMENT */ + /* Make a "hole" at STM_PSEGMENT (which includes STM_SEGMENT) */ memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, sizeof(*STM_PSEGMENT)); @@ -49,11 +52,14 @@ (FIRST_READMARKER_PAGE - 2) * 4096UL, PROT_NONE); + /* Initialize STM_PSEGMENT */ struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(i + 1 < 255); /* 255 is WL_VISITED in gcpage.c */ - pr->write_lock_num = i + 1; + assert(1 <= i && i < 255); /* 255 is WL_VISITED in gcpage.c */ + pr->write_lock_num = i; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; + pr->private_page_mapping = tree_create(); + pr->private_free_page_num = END_NURSERY_PAGE; pr->objects_pointing_to_nursery = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); @@ -62,7 +68,7 @@ pr->young_outside_nursery = tree_create(); pr->nursery_objects_shadows = tree_create(); pr->callbacks_on_abort = tree_create(); - pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * (i + 1); + pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; } @@ -73,10 +79,6 @@ STM_SEGMENT->transaction_read_version never contains zero, so a null read marker means "not read" whatever the current transaction_read_version is. - - The creation markers are initially zero, which is correct: - it means "objects of this line of 256 bytes have not been - allocated by the current transaction." */ setup_sync(); @@ -92,7 +94,7 @@ assert(!_has_mutex()); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); @@ -107,8 +109,6 @@ munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; - memset(flag_page_private, 0, sizeof(flag_page_private)); - teardown_core(); teardown_sync(); teardown_gcpage(); @@ -146,14 +146,14 @@ tl->prev = stm_all_thread_locals->prev; stm_all_thread_locals->prev->next = tl; stm_all_thread_locals->prev = tl; - num = tl->prev->associated_segment_num + 1; + num = tl->prev->associated_segment_num; } tl->thread_local_obj = NULL; /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own numbers automatically. */ - num = num % NB_SEGMENTS; + num = (num % NB_SEGMENTS) + 1; tl->associated_segment_num = num; _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -30,7 +30,7 @@ pthread_mutex_t global_mutex; pthread_cond_t cond[_C_TOTAL]; /* some additional pieces of global state follow */ - uint8_t in_use[NB_SEGMENTS]; /* 1 if running a pthread */ + uint8_t in_use1[NB_SEGMENTS]; /* 1 if running a pthread */ uint64_t global_time; }; char reserved[192]; @@ -60,7 +60,7 @@ stm_fatalerror("cond destroy: %m\n"); } - memset(&sync_ctl, 0, sizeof(sync_ctl.in_use)); + memset(&sync_ctl, 0, sizeof(sync_ctl)); } #ifndef NDEBUG @@ -124,12 +124,12 @@ { long i; restart: - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { if (can_abort) { /* handle this case like a contention: it will either abort us (not the other thread, which is inevitable), - or for a while. If we go past this call, then we + or wait for a while. If we go past this call, then we waited; in this case we have to re-check if no other thread is inevitable. */ inevitable_contention_management(i); @@ -152,7 +152,7 @@ assert(_is_tl_registered(tl)); int num = tl->associated_segment_num; - if (sync_ctl.in_use[num] == 0) { + if (sync_ctl.in_use1[num - 1] == 0) { /* fast-path: we can get the same segment number than the one we had before. The value stored in GS is still valid. */ #ifdef STM_TESTS @@ -165,10 +165,10 @@ } /* Look for the next free segment. If there is none, wait for the condition variable. */ - int i; - for (i = 0; i < NB_SEGMENTS; i++) { - num = (num + 1) % NB_SEGMENTS; - if (sync_ctl.in_use[num] == 0) { + int retries; + for (retries = 0; retries < NB_SEGMENTS; retries++) { + num = (num % NB_SEGMENTS) + 1; + if (sync_ctl.in_use1[num - 1] == 0) { /* we're getting 'num', a different number. */ dprintf(("acquired different segment: %d->%d\n", tl->associated_segment_num, num)); tl->associated_segment_num = num; @@ -184,7 +184,7 @@ return false; got_num: - sync_ctl.in_use[num] = 1; + sync_ctl.in_use1[num - 1] = 1; assert(STM_SEGMENT->segment_num == num); assert(STM_SEGMENT->running_thread == NULL); STM_SEGMENT->running_thread = tl; @@ -208,8 +208,8 @@ assert(STM_SEGMENT->running_thread == tl); STM_SEGMENT->running_thread = NULL; - assert(sync_ctl.in_use[tl->associated_segment_num] == 1); - sync_ctl.in_use[tl->associated_segment_num] = 0; + assert(sync_ctl.in_use1[tl->associated_segment_num - 1] == 1); + sync_ctl.in_use1[tl->associated_segment_num - 1] = 0; } __attribute__((unused)) @@ -221,7 +221,7 @@ bool _stm_in_transaction(stm_thread_local_t *tl) { int num = tl->associated_segment_num; - assert(num < NB_SEGMENTS); + assert(1 <= num && num <= NB_SEGMENTS); return get_segment(num)->running_thread == tl; } @@ -262,7 +262,7 @@ assert((_safe_points_requested = true, 1)); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (get_segment(i)->nursery_end == NURSERY_END) get_segment(i)->nursery_end = NSE_SIGPAUSE; } @@ -276,7 +276,7 @@ long result = 0; int my_num = STM_SEGMENT->segment_num; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (i != my_num && get_priv_segment(i)->safe_point == SP_RUNNING) { assert(get_segment(i)->nursery_end <= _STM_NSE_SIGNAL_MAX); result++; @@ -291,7 +291,7 @@ assert((_safe_points_requested = false, 1)); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { assert(get_segment(i)->nursery_end != NURSERY_END); if (get_segment(i)->nursery_end == NSE_SIGPAUSE) get_segment(i)->nursery_end = NURSERY_END; diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -29,6 +29,8 @@ static void _set_weakref_in_all_segments(object_t *weakref, object_t *value) { + abort();//XXX +#if 0 ssize_t size = 16; stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size); @@ -44,6 +46,7 @@ else { *WEAKREF_PTR(weakref, size) = value; } +#endif } /***** Minor collection *****/ From noreply at buildbot.pypy.org Sun Mar 16 09:46:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 09:46:32 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: in-progress Message-ID: <20140316084632.903921C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1028:c06abb7b21ca Date: 2014-03-16 09:46 +0100 http://bitbucket.org/pypy/stmgc/changeset/c06abb7b21ca/ Log: in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -238,26 +238,30 @@ return false; } -static void synchronize_overflow_object_now(object_t *obj) +static void synchronize_object_now(object_t *obj, bool assume_local_private) { - abort();//XXX -#if 0 + /* Copy around the version of 'obj' that lives in our own segment. + It is first copied into the shared pages, and then into other + segments' own private pages. + */ assert(!_is_young(obj)); - assert((obj->stm_flags & GCFLAG_SMALL_UNIFORM) == 0); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - assert(obj_size >= 16); uintptr_t start = (uintptr_t)obj; - uintptr_t end = start + obj_size; uintptr_t first_page = start / 4096UL; - uintptr_t last_page = (end - 1) / 4096UL; - do { - if (flag_page_private[first_page] != SHARED_PAGE) { - /* The page is a PRIVATE_PAGE. We need to diffuse this fragment - of our object from our own segment to all other segments. */ + if (obj->stm_flags & GCFLAG_SMALL_UNIFORM) { + abort();//XXX WRITE THE FAST CASE + } + else { + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + assert(obj_size >= 16); + uintptr_t end = start + obj_size; + uintptr_t last_page = (end - 1) / 4096UL; + long i, myself = STM_SEGMENT->segment_num; + + for (; first_page <= last_page; first_page++) { uintptr_t copy_size; if (first_page == last_page) { @@ -265,86 +269,44 @@ copy_size = end - start; } else { - /* this is a non-final fragment, going up to the page's end */ + /* this is a non-final fragment, going up to the + page's end */ copy_size = 4096 - (start & 4095); } - /* double-check that the result fits in one page */ assert(copy_size > 0); assert(copy_size + (start & 4095) <= 4096); - long i; - char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); - for (i = 0; i < NB_SEGMENTS; i++) { - abort();//XXX - if (i != STM_SEGMENT->segment_num) { - char *dst = REAL_ADDRESS(get_segment_base(i), start); + /* First copy the object into the shared page, if needed */ + assert(IMPLY(assume_local_private, + is_private_page(myself, first_page))); + + if (assume_local_private || is_private_page(myself, first_page)) { + char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); + char *dst = REAL_ADDRESS(stm_object_pages, start); + if (copy_size == 4096) + pagecopy(dst, src); + else memcpy(dst, src, copy_size); - } } - } - - start = (start + 4096) & ~4095; - } while (first_page++ < last_page); -#endif -} - -static void synchronize_object_now(object_t *obj) -{ - /* Assume that the version of 'obj' in the shared pages is up-to-date. - Assume also that the version in our own private page is up-to-date. - This function updates the private page of other threads. - */ - assert(!_is_young(obj)); - assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - - uintptr_t start = (uintptr_t)obj; - uintptr_t first_page = start / 4096UL; - long i; - - if (obj->stm_flags & GCFLAG_SMALL_UNIFORM) { - abort();//XXX WRITE THE FAST CASE - } - else { - char *realobj = REAL_ADDRESS(stm_object_pages, obj); - ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - assert(obj_size >= 16); - uintptr_t end = start + obj_size; - uintptr_t last_page = (end - 1) / 4096UL; - - for (; first_page <= last_page; first_page++) { for (i = 1; i < NB_SEGMENTS; i++) { - - if (i == STM_SEGMENT->segment_num) + if (i == myself) continue; - if (!is_private_page(i, first_page)) continue; - /* The page is a PRIVATE_PAGE. We need to diffuse this + /* The page is a private page. We need to diffuse this fragment of object from the shared page to this private page. */ - - uintptr_t copy_size; - if (first_page == last_page) { - /* this is the final fragment */ - copy_size = end - start; - } - else { - /* this is a non-final fragment, going up to the - page's end */ - copy_size = 4096 - (start & 4095); - } - - /* double-check that the result fits in one page */ - assert(copy_size > 0); - assert(copy_size + (start & 4095) <= 4096); - char *src = REAL_ADDRESS(stm_object_pages, start); char *dst = REAL_ADDRESS(get_segment_base(i), start); - memcpy(dst, src, copy_size); + if (copy_size == 4096) + pagecopy(dst, src); + else + memcpy(dst, src, copy_size); } + start = (start + 4096) & ~4095; } } @@ -356,13 +318,11 @@ return; LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, - synchronize_overflow_object_now(item)); + synchronize_object_now(item, false)); } static void push_modified_to_other_segments(void) { - char *local_base = STM_SEGMENT->segment_base; - LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, @@ -378,14 +338,8 @@ minor_collection() */ assert((item->stm_flags & GCFLAG_WRITE_BARRIER) != 0); - /* copy the modified object to the shared copy */ - char *src = REAL_ADDRESS(local_base, item); - char *dst = REAL_ADDRESS(stm_object_pages, item); - ssize_t size = stmcb_size_rounded_up((struct object_s *)src); - memcpy(dst, src, size); - /* copy the object to the other private pages as needed */ - synchronize_object_now(item); + synchronize_object_now(item, true); })); list_clear(STM_PSEGMENT->modified_old_objects); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -231,4 +231,4 @@ asm("/* workaround for llvm bug */"); } -static void synchronize_overflow_object_now(object_t *obj); +static void synchronize_object_now(object_t *obj, bool assume_local_private); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -199,7 +199,7 @@ content); or add the object to 'large_overflow_objects'. */ if (STM_PSEGMENT->minor_collect_will_commit_now) - synchronize_overflow_object_now(obj); + synchronize_object_now(obj, false); else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } From noreply at buildbot.pypy.org Sun Mar 16 10:00:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 10:00:55 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: fix Message-ID: <20140316090055.7FD431C0670@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1029:8469a9bdaccd Date: 2014-03-16 09:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/8469a9bdaccd/ Log: fix diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -235,7 +235,7 @@ uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; if (prev_owner != 0 && prev_owner != STM_PSEGMENT->write_lock_num) { - uint8_t other_segment_num = prev_owner - 1; + uint8_t other_segment_num = prev_owner; assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); From noreply at buildbot.pypy.org Sun Mar 16 10:00:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 10:00:56 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Fix many tests Message-ID: <20140316090056.B1A931C0670@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1030:1d43c3e1a2ff Date: 2014-03-16 10:00 +0100 http://bitbucket.org/pypy/stmgc/changeset/1d43c3e1a2ff/ Log: Fix many tests diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -533,6 +533,8 @@ dprintf((" .----- major collection -----------------------\n")); assert(_has_mutex()); + if (0) { // XXX TEMP + /* first, force a minor collection in each of the other segments */ major_do_minor_collections(); @@ -566,6 +568,7 @@ dprintf((" | used after collection: %ld\n", (long)pages_ctl.total_allocated)); + } dprintf((" `----------------------------------------------\n")); reset_major_collection_requested(); diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -41,9 +41,11 @@ } #ifdef STM_TESTS -uint8_t _stm_get_page_flag(uintptr_t index) +uintptr_t _stm_get_private_page(uintptr_t pagenum) { - abort();//XXX + wlog_t *item; + TREE_FIND(*STM_PSEGMENT->private_page_mapping, pagenum, item, return 0); + return item->val; } long _stm_count_modified_old_objects(void) diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -118,7 +118,7 @@ static void stm_visit_old_weakrefs(void) { long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); struct list_s *lst; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -87,7 +87,7 @@ #include bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); -uint8_t _stm_get_page_flag(uintptr_t index); +uintptr_t _stm_get_private_page(uintptr_t pagenum); bool _stm_in_transaction(stm_thread_local_t *tl); char *_stm_get_segment_base(long index); void _stm_test_switch(stm_thread_local_t *tl); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -46,7 +46,7 @@ char *_stm_get_segment_base(long index); bool _stm_in_transaction(stm_thread_local_t *tl); void _stm_test_switch(stm_thread_local_t *tl); -uint8_t _stm_get_page_flag(uintptr_t index); +uintptr_t _stm_get_private_page(uintptr_t pagenum); int _stm_get_flags(object_t *obj); void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); @@ -98,8 +98,6 @@ GC_N_SMALL_REQUESTS = 36 # from gcpage.c -SHARED_PAGE = 1 # from pages.h -PRIVATE_PAGE = 3 # from pages.h LARGE_MALLOC_OVERHEAD = 16 # from largemalloc.h lib = ffi.verify(''' @@ -361,8 +359,8 @@ def stm_major_collect(): lib.stm_collect(1) -def stm_get_page_flag(pagenum): - return lib._stm_get_page_flag(pagenum) +def stm_get_private_page(pagenum): + return lib._stm_get_private_page(pagenum) def stm_get_obj_size(o): return lib.stmcb_size_rounded_up(stm_get_real_address(o)) diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -24,8 +24,8 @@ new = self.pop_root() assert len(stm_get_obj_pages(new)) == 2 - assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - == [SHARED_PAGE]*2) + assert ([stm_get_private_page(p) for p in stm_get_obj_pages(new)] + == [0, 0]) assert not is_in_nursery(new) stm_write(new) @@ -33,11 +33,11 @@ # now proceed to write into the object in a new transaction self.start_transaction() - assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - == [SHARED_PAGE]*2) + assert ([stm_get_private_page(p) for p in stm_get_obj_pages(new)] + == [0, 0]) stm_write(new) - assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - == [PRIVATE_PAGE]*2) + assert ([bool(stm_get_private_page(p)) for p in stm_get_obj_pages(new)] + == [True, True]) # write to 2nd page of object!! wnew = stm_get_real_address(new) @@ -52,8 +52,8 @@ self.switch(0) self.abort_transaction() - assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - == [PRIVATE_PAGE]*2) + assert ([bool(stm_get_private_page(p)) for p in stm_get_obj_pages(new)] + == [True, True]) def test_partial_alloced_pages(self): self.start_transaction() @@ -62,14 +62,14 @@ stm_minor_collect() new = self.pop_root() - assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == SHARED_PAGE + assert stm_get_private_page(stm_get_obj_pages(new)[0]) == 0 assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER stm_write(new) assert not (stm_get_flags(new) & GCFLAG_WRITE_BARRIER) self.commit_transaction() - assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == SHARED_PAGE + assert stm_get_private_page(stm_get_obj_pages(new)[0]) == 0 assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER self.start_transaction() @@ -78,7 +78,7 @@ stm_minor_collect() newer = self.pop_root() # 'new' is still in shared_page and committed - assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == SHARED_PAGE + assert stm_get_private_page(stm_get_obj_pages(new)[0]) == 0 assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER # 'newer' is now part of the SHARED page with 'new', but # uncommitted, so no privatization has to take place: @@ -86,10 +86,10 @@ assert stm_get_flags(newer) & GCFLAG_WRITE_BARRIER stm_write(newer) # does not privatize assert not (stm_get_flags(newer) & GCFLAG_WRITE_BARRIER) - assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == SHARED_PAGE + assert stm_get_private_page(stm_get_obj_pages(newer)[0]) == 0 self.commit_transaction() - assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == SHARED_PAGE + assert stm_get_private_page(stm_get_obj_pages(newer)[0]) == 0 assert stm_get_flags(newer) & GCFLAG_WRITE_BARRIER def test_major_collection(self): From noreply at buildbot.pypy.org Sun Mar 16 10:38:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 10:38:48 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Don't blink. This fixes a bug ('<' instead of '<=') in the middle Message-ID: <20140316093848.AF6F21C0670@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1031:aef2a1cd2a7c Date: 2014-03-16 10:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/aef2a1cd2a7c/ Log: Don't blink. This fixes a bug ('<' instead of '<=') in the middle of a ton of extra asserts needed to find it. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -13,6 +13,7 @@ { assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); /* is this an object from the same transaction, outside the nursery? */ if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == @@ -104,15 +105,24 @@ LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); } + /* check that we really have a private page */ + assert(_stm_get_private_page(((uintptr_t)obj) / 4096)); + + /* check that so far all copies of the object have the flag */ + long i; + for (i = 0; i <= NB_SEGMENTS; i++) { + assert(((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) + ->stm_flags & GCFLAG_WRITE_BARRIER); + } + /* add the write-barrier-already-called flag ONLY if we succeeded in getting the write-lock */ assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; /* for sanity, check that all other segment copies of this object - still have the flag */ - long i; - for (i = 1; i <= NB_SEGMENTS; i++) { + still have the flag (including the shared copy) */ + for (i = 0; i <= NB_SEGMENTS; i++) { if (i != STM_SEGMENT->segment_num) assert(((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) ->stm_flags & GCFLAG_WRITE_BARRIER); @@ -281,30 +291,36 @@ assert(IMPLY(assume_local_private, is_private_page(myself, first_page))); + char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); + char *dst = REAL_ADDRESS(stm_object_pages, start); if (assume_local_private || is_private_page(myself, first_page)) { - char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); - char *dst = REAL_ADDRESS(stm_object_pages, start); if (copy_size == 4096) pagecopy(dst, src); else memcpy(dst, src, copy_size); } + else { + assert(memcmp(dst, src, copy_size) == 0); /* same page */ + } - for (i = 1; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (i == myself) continue; - if (!is_private_page(i, first_page)) - continue; - /* The page is a private page. We need to diffuse this - fragment of object from the shared page to this private - page. */ - char *src = REAL_ADDRESS(stm_object_pages, start); - char *dst = REAL_ADDRESS(get_segment_base(i), start); - if (copy_size == 4096) - pagecopy(dst, src); - else - memcpy(dst, src, copy_size); + src = REAL_ADDRESS(stm_object_pages, start); + dst = REAL_ADDRESS(get_segment_base(i), start); + if (is_private_page(i, first_page)) { + /* The page is a private page. We need to diffuse this + fragment of object from the shared page to this private + page. */ + if (copy_size == 4096) + pagecopy(dst, src); + else + memcpy(dst, src, copy_size); + } + else { + assert(memcmp(dst, src, copy_size) == 0); /* same page */ + } } start = (start + 4096) & ~4095; @@ -338,7 +354,8 @@ minor_collection() */ assert((item->stm_flags & GCFLAG_WRITE_BARRIER) != 0); - /* copy the object to the other private pages as needed */ + /* copy the object to the shared page, and to the other + private pages as needed */ synchronize_object_now(item, true); })); diff --git a/c7/stm/fprintcolor.h b/c7/stm/fprintcolor.h --- a/c7/stm/fprintcolor.h +++ b/c7/stm/fprintcolor.h @@ -9,7 +9,7 @@ #define dprintf(args) threadcolor_printf args static inline int dprintfcolor(void) { - return 31 + STM_SEGMENT->segment_num % 6; + return 31 + (STM_SEGMENT->segment_num + 5) % 6; } static int threadcolor_printf(const char *format, ...) From noreply at buildbot.pypy.org Sun Mar 16 10:40:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 10:40:27 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Fix for non-STM_TESTS cases Message-ID: <20140316094027.394761C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1032:4dc5acd4c8e0 Date: 2014-03-16 10:40 +0100 http://bitbucket.org/pypy/stmgc/changeset/4dc5acd4c8e0/ Log: Fix for non-STM_TESTS cases diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -106,7 +106,8 @@ } /* check that we really have a private page */ - assert(_stm_get_private_page(((uintptr_t)obj) / 4096)); + assert(tree_contains(STM_PSEGMENT->private_page_mapping, + ((uintptr_t)obj) / 4096)); /* check that so far all copies of the object have the flag */ long i; From noreply at buildbot.pypy.org Sun Mar 16 10:57:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 10:57:06 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Fix major GC. Message-ID: <20140316095706.E11451C0670@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1033:ece9046d8595 Date: 2014-03-16 10:56 +0100 http://bitbucket.org/pypy/stmgc/changeset/ece9046d8595/ Log: Fix major GC. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -325,11 +325,6 @@ /* takes a normal pointer to a thread-local pointer to an object */ object_t *obj = *pobj; - if (obj == NULL || mark_visited_test_and_set(obj)) - return; /* already visited this object */ - - LIST_APPEND(mark_objects_to_trace, obj); - /* Note: this obj might be visited already, but from a different segment. We ignore this case and skip re-visiting the object anyway. The idea is that such an object is old (not from the @@ -340,6 +335,10 @@ segments and only needs visiting once. (It may actually be in a shared page, or maybe not.) */ + if (obj == NULL || mark_visited_test_and_set(obj)) + return; /* already visited this object */ + + LIST_APPEND(mark_objects_to_trace, obj); } static void mark_trace(object_t *obj, char *segment_base) @@ -347,15 +346,6 @@ assert(list_is_empty(mark_objects_to_trace)); while (1) { - -#if 0 - /* first, if we're not seeing segment 0, we must change the - flags in flag_page_private[] from PRIVATE_PAGE to - SEGMENT1_PAGE, which will mean "can't re-share" */ - if (segment_base != stm_object_pages && RESHARE_PAGES) - mark_flag_page_private(obj, segment_base); -#endif - /* trace into the object (the version from 'segment_base') */ struct object_s *realobj = (struct object_s *)REAL_ADDRESS(segment_base, obj); @@ -377,45 +367,33 @@ static void mark_visit_from_roots(void) { - if (testing_prebuilt_objs != NULL) { LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/, - mark_visit_object(item, get_segment_base(0))); + mark_visit_object(item, stm_object_pages)); } - /* Do the following twice, so that we trace first the objects from - segment 0, and then all others. XXX This is a hack to make it - more likely that we'll be able to re-share pages. */ + stm_thread_local_t *tl = stm_all_thread_locals; + do { + /* If 'tl' is currently running, its 'associated_segment_num' + field is the segment number that contains the correct + version of its overflowed objects. If not, then the + field is still some correct segment number, and it doesn't + matter which one we pick. */ + char *segment_base = get_segment_base(tl->associated_segment_num); - int must_be_zero; - for (must_be_zero = 1; must_be_zero >= 0; must_be_zero--) { + struct stm_shadowentry_s *current = tl->shadowstack; + struct stm_shadowentry_s *base = tl->shadowstack_base; + while (current-- != base) { + assert(current->ss != (object_t *)-1); + mark_visit_object(current->ss, segment_base); + } + mark_visit_object(tl->thread_local_obj, segment_base); - stm_thread_local_t *tl = stm_all_thread_locals; - do { - /* If 'tl' is currently running, its 'associated_segment_num' - field is the segment number that contains the correct - version of its overflowed objects. If not, then the - field is still some correct segment number, and it doesn't - matter which one we pick. */ - char *segment_base = get_segment_base(tl->associated_segment_num); - - if (must_be_zero == (segment_base == get_segment_base(0))) { - - struct stm_shadowentry_s *current = tl->shadowstack; - struct stm_shadowentry_s *base = tl->shadowstack_base; - while (current-- != base) { - assert(current->ss != (object_t *)-1); - mark_visit_object(current->ss, segment_base); - } - mark_visit_object(tl->thread_local_obj, segment_base); - } - - tl = tl->next; - } while (tl != stm_all_thread_locals); - } + tl = tl->next; + } while (tl != stm_all_thread_locals); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (get_priv_segment(i)->transaction_state != TS_NONE) mark_visit_object( get_priv_segment(i)->threadlocal_at_start_of_transaction, @@ -426,20 +404,21 @@ static void mark_visit_from_modified_objects(void) { /* The modified objects are the ones that may exist in two different - versions: one in the segment that modified it, and another in - all other segments. */ + versions: one in the segment that modified it, and another in all + other segments. (It can also be more than two if we don't have + eager write locking.) + */ long i; - for (i = 0; i < NB_SEGMENTS; i++) { - char *base1 = get_segment_base(i); /* two different segments */ - char *base2 = get_segment_base(!i); + for (i = 1; i <= NB_SEGMENTS; i++) { + char *base = get_segment_base(i); LIST_FOREACH_R( get_priv_segment(i)->modified_old_objects, object_t * /*item*/, ({ mark_visited_test_and_set(item); - mark_trace(item, base1); - mark_trace(item, base2); + mark_trace(item, stm_object_pages); /* shared version */ + mark_trace(item, base); /* private version */ })); } } @@ -447,7 +426,7 @@ static void clean_up_segment_lists(void) { long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); struct list_s *lst; @@ -513,7 +492,7 @@ { /* restore the write locks on the modified objects */ long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); LIST_FOREACH_R( @@ -533,8 +512,6 @@ dprintf((" .----- major collection -----------------------\n")); assert(_has_mutex()); - if (0) { // XXX TEMP - /* first, force a minor collection in each of the other segments */ major_do_minor_collections(); @@ -548,7 +525,8 @@ LIST_FREE(mark_objects_to_trace); /* weakrefs: */ - stm_visit_old_weakrefs(); + if (0)//XXX + stm_visit_old_weakrefs(); /* cleanup */ clean_up_segment_lists(); @@ -568,7 +546,6 @@ dprintf((" | used after collection: %ld\n", (long)pages_ctl.total_allocated)); - } dprintf((" `----------------------------------------------\n")); reset_major_collection_requested(); From noreply at buildbot.pypy.org Sun Mar 16 11:02:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 11:02:45 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Fix (and simplify the code a bit) Message-ID: <20140316100245.9B0B31C357E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1034:88810db654ce Date: 2014-03-16 11:00 +0100 http://bitbucket.org/pypy/stmgc/changeset/88810db654ce/ Log: Fix (and simplify the code a bit) diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -29,29 +29,21 @@ static void _set_weakref_in_all_segments(object_t *weakref, object_t *value) { - abort();//XXX -#if 0 ssize_t size = 16; stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size); - if (flag_page_private[(uintptr_t)point_to_loc / 4096UL] == PRIVATE_PAGE) { - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - char *base = get_segment_base(i); /* two different segments */ - object_t ** ref_loc = (object_t **)REAL_ADDRESS(base, point_to_loc); - *ref_loc = value; - } + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *base = get_segment_base(i); + object_t ** ref_loc = (object_t **)REAL_ADDRESS(base, point_to_loc); + *ref_loc = value; } - else { - *WEAKREF_PTR(weakref, size) = value; - } -#endif } /***** Minor collection *****/ -static void stm_move_young_weakrefs() +static void stm_move_young_weakrefs(void) { /* The code relies on the fact that no weakref can be an old object weakly pointing to a young object. Indeed, weakrefs are immutable From noreply at buildbot.pypy.org Sun Mar 16 11:02:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 11:02:46 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: hg merge default Message-ID: <20140316100246.A5D051C357E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1035:6587c5783a11 Date: 2014-03-16 11:00 +0100 http://bitbucket.org/pypy/stmgc/changeset/6587c5783a11/ Log: hg merge default diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -411,7 +411,10 @@ if lib._stm_in_transaction(tl): if self.current_thread != n: self.switch(n) - self.abort_transaction() + if lib.stm_is_inevitable(): + self.commit_transaction() # must succeed! + else: + self.abort_transaction() for tl in self.tls: lib.stm_unregister_thread_local(tl) lib.stm_teardown() diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -146,6 +146,7 @@ class TestIsolation(BaseTest): def test_not_break(self): + py.test.xfail("known to fail") lpold = stm_allocate_old_refs(1) self.start_transaction() From noreply at buildbot.pypy.org Sun Mar 16 11:02:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 11:02:47 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Fixes Message-ID: <20140316100247.B28531C357E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1036:93b0deba3f1f Date: 2014-03-16 11:02 +0100 http://bitbucket.org/pypy/stmgc/changeset/93b0deba3f1f/ Log: Fixes diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -525,8 +525,7 @@ LIST_FREE(mark_objects_to_trace); /* weakrefs: */ - if (0)//XXX - stm_visit_old_weakrefs(); + stm_visit_old_weakrefs(); /* cleanup */ clean_up_segment_lists(); diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -279,7 +279,7 @@ stm_write(lp0) # privatize page with weakref in it too - assert stm_get_page_flag(stm_get_obj_pages(lp1)[0]) == PRIVATE_PAGE + assert stm_get_private_page(stm_get_obj_pages(lp1)[0]) != 0 assert stm_get_weakref(lp1) == lp0 self.commit_transaction() From noreply at buildbot.pypy.org Sun Mar 16 11:55:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 11:55:42 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Trying to use three threads. Buggy still. Message-ID: <20140316105542.DCE301C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1037:d9d666092944 Date: 2014-03-16 11:51 +0100 http://bitbucket.org/pypy/stmgc/changeset/d9d666092944/ Log: Trying to use three threads. Buggy still. diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -6,6 +6,7 @@ #include "stmgc.h" +#define NTHREADS 3 #define LIST_LENGTH 2000 #define BUNCH 100 @@ -223,7 +224,7 @@ int main(void) { - int status; + int status, i; status = sem_init(&done, 0, 0); assert(status == 0); @@ -233,11 +234,13 @@ setup_list(); - newthread(demo2, (void*)1); - newthread(demo2, (void*)2); + for (i = 1; i <= NTHREADS; i++) { + newthread(demo2, (void*)(uintptr_t)i); + } - status = sem_wait(&done); assert(status == 0); - status = sem_wait(&done); assert(status == 0); + for (i = 1; i <= NTHREADS; i++) { + status = sem_wait(&done); assert(status == 0); + } final_check(); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -529,6 +529,7 @@ static void abort_with_mutex(void) { + assert(_has_mutex()); dprintf(("~~~ ABORT\n")); switch (STM_PSEGMENT->transaction_state) { diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -15,7 +15,7 @@ #define NB_PAGES (1500*256) // 1500MB -#define NB_SEGMENTS 2 +#define NB_SEGMENTS 3 #define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) #define NB_NURSERY_PAGES (STM_GC_NURSERY/4) diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -260,6 +260,7 @@ { assert(_safe_points_requested == false); assert((_safe_points_requested = true, 1)); + assert(_has_mutex()); long i; for (i = 1; i <= NB_SEGMENTS; i++) { From noreply at buildbot.pypy.org Sun Mar 16 11:55:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 11:55:44 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: The error was a too-strict assertion. Message-ID: <20140316105544.123221C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1038:b65a48fadcbd Date: 2014-03-16 11:55 +0100 http://bitbucket.org/pypy/stmgc/changeset/b65a48fadcbd/ Log: The error was a too-strict assertion. diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -293,7 +293,12 @@ long i; for (i = 1; i <= NB_SEGMENTS; i++) { - assert(get_segment(i)->nursery_end != NURSERY_END); + /* note: the only possible way to concurrently change the value + of 'nursery_end' is with an abort done while we wait for + C_AT_SAFE_POINT. It's fine because the next transaction + should not start. */ + assert(get_segment(i)->nursery_end != NURSERY_END || + get_priv_segment(i)->transaction_state == TS_NONE); if (get_segment(i)->nursery_end == NSE_SIGPAUSE) get_segment(i)->nursery_end = NURSERY_END; } From noreply at buildbot.pypy.org Sun Mar 16 12:04:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 12:04:50 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Test the maximum number of segments, in test_random. Message-ID: <20140316110450.DBC3B1C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1039:0454581ce4a8 Date: 2014-03-16 12:04 +0100 http://bitbucket.org/pypy/stmgc/changeset/0454581ce4a8/ Log: Test the maximum number of segments, in test_random. diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -15,7 +15,7 @@ #define NB_PAGES (1500*256) // 1500MB -#define NB_SEGMENTS 3 +#define NB_SEGMENTS STM_NB_SEGMENTS #define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) #define NB_NURSERY_PAGES (STM_GC_NURSERY/4) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -127,6 +127,12 @@ /* ==================== PUBLIC API ==================== */ +/* Number of segments (i.e. how many threads can be executed in + parallel, in maximum). +*/ +#define STM_NB_SEGMENTS 4 + + /* Structure of objects -------------------- diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -9,6 +9,7 @@ typedef ... object_t; typedef ... stm_jmpbuf_t; #define SIZEOF_MYOBJ ... +#define STM_NB_SEGMENTS ... #define _STM_FAST_ALLOC ... #define _STM_GCFLAG_WRITE_BARRIER ... @@ -260,6 +261,7 @@ HDR = lib.SIZEOF_MYOBJ assert HDR == 8 GCFLAG_WRITE_BARRIER = lib._STM_GCFLAG_WRITE_BARRIER +NB_SEGMENTS = lib.STM_NB_SEGMENTS class Conflict(Exception): @@ -400,10 +402,11 @@ class BaseTest(object): + NB_THREADS = 2 def setup_method(self, meth): lib.stm_setup() - self.tls = [_allocate_thread_local(), _allocate_thread_local()] + self.tls = [_allocate_thread_local() for i in range(self.NB_THREADS)] self.current_thread = 0 def teardown_method(self, meth): diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -533,12 +533,13 @@ class TestRandom(BaseTest): + NB_THREADS = NB_SEGMENTS def test_fixed_16_bytes_objects(self, seed=1010): rnd = random.Random(seed) N_OBJECTS = 3 - N_THREADS = 2 + N_THREADS = self.NB_THREADS ex = Exec(self) ex.do("################################################################\n"*10) ex.do('# initialization') From noreply at buildbot.pypy.org Sun Mar 16 12:25:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 12:25:17 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: More debugging info Message-ID: <20140316112517.3EAB01C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1040:b38942a05903 Date: 2014-03-16 12:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/b38942a05903/ Log: More debugging info diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -54,7 +54,7 @@ e.g. maintains read/write sets. The state will be discarded on abort or pushed to other threads""" - def __init__(self, start_time): + def __init__(self, start_time, thread_num=None): self.read_set = set() self.write_set = set() self.values = {} @@ -63,6 +63,7 @@ self.objs_in_conflict = set() self.inevitable = False self.created_in_this_transaction = set() + self.thread_num = thread_num def get_old_modified(self): # returns only the ones that are modified and not from @@ -74,6 +75,8 @@ if objs_in_conflict is not None: self.objs_in_conflict |= objs_in_conflict self._must_abort = True + color = "\033[%dm" % (31 + self.thread_num % 6) + print >> sys.stderr, color + "# must abort: %r\033[0m" % (objs_in_conflict,) def check_must_abort(self): return self._must_abort @@ -180,10 +183,10 @@ r, int(ffi.cast("uintptr_t", ex.content[r])), stm_get_obj_size(ex.content[r]))) - def start_transaction(self): + def start_transaction(self, thread_num): assert self.transaction_state is None start_time = self.global_state.inc_and_get_global_time() - trs = TransactionState(start_time) + trs = TransactionState(start_time, thread_num) trs.update_from_committed( self.global_state.committed_transaction_state) self.transaction_state = trs @@ -305,7 +308,7 @@ def op_start_transaction(ex, global_state, thread_state): - thread_state.start_transaction() + thread_state.start_transaction(ex.thread_num) # ex.do('self.start_transaction()') thread_state.reload_roots(ex) From noreply at buildbot.pypy.org Sun Mar 16 12:25:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 12:25:18 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Fix Message-ID: <20140316112518.471181C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1041:e37e0bb66492 Date: 2014-03-16 12:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/e37e0bb66492/ Log: Fix diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -294,6 +294,8 @@ if confl_set: contention_management(trs, other_trs, objs_in_conflict=confl_set) + if trs.check_must_abort(): + break if trs.check_must_abort(): self.ex.do('# write-read conflict: %s' % From noreply at buildbot.pypy.org Sun Mar 16 12:32:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 12:32:41 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix the test from llvm's current point of view Message-ID: <20140316113241.455B41C0670@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1042:d7e384cd1ea9 Date: 2014-03-16 12:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/d7e384cd1ea9/ Log: Fix the test from llvm's current point of view diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -385,7 +385,7 @@ for (i = 0; i < PREBUILT_ROOTS; i++) { void* new_templ = malloc(sizeof(struct node_s)); memcpy(new_templ, &prebuilt_template, sizeof(struct node_s)); - prebuilt_roots[i] = stm_setup_prebuilt((objptr_t)new_templ); + prebuilt_roots[i] = stm_setup_prebuilt((objptr_t)(long)new_templ); if (i % 2 == 0) { int hash = i + 5; From noreply at buildbot.pypy.org Sun Mar 16 12:34:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 12:34:13 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Use by default the value of STM_NB_SEGMENTS. Message-ID: <20140316113413.E84781C0670@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1043:883134c1db24 Date: 2014-03-16 12:33 +0100 http://bitbucket.org/pypy/stmgc/changeset/883134c1db24/ Log: Use by default the value of STM_NB_SEGMENTS. diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -7,7 +7,7 @@ char *filename = NULL; int interactive = 1; int i; - int num_threads = DEFAULT_NUM_THREADS; + int num_threads = STM_NB_SEGMENTS; for (i = 1; i < argc; ++i) { if (strcmp(argv[i], "--help") == 0) { diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -13,8 +13,6 @@ #endif -#define DEFAULT_NUM_THREADS 2 - extern __thread stm_thread_local_t stm_thread_local; struct DuObject_s { From noreply at buildbot.pypy.org Sun Mar 16 12:34:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 12:34:15 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: hg merge default Message-ID: <20140316113415.03F5D1C0670@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1044:898bd6b4224f Date: 2014-03-16 12:33 +0100 http://bitbucket.org/pypy/stmgc/changeset/898bd6b4224f/ Log: hg merge default diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -385,7 +385,7 @@ for (i = 0; i < PREBUILT_ROOTS; i++) { void* new_templ = malloc(sizeof(struct node_s)); memcpy(new_templ, &prebuilt_template, sizeof(struct node_s)); - prebuilt_roots[i] = stm_setup_prebuilt((objptr_t)new_templ); + prebuilt_roots[i] = stm_setup_prebuilt((objptr_t)(long)new_templ); if (i % 2 == 0) { int hash = i + 5; From noreply at buildbot.pypy.org Sun Mar 16 12:46:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 12:46:04 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Comment Message-ID: <20140316114605.012B61C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1045:a04c4c49dedd Date: 2014-03-16 12:45 +0100 http://bitbucket.org/pypy/stmgc/changeset/a04c4c49dedd/ Log: Comment diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -122,7 +122,9 @@ /* look up the next free page */ uintptr_t free_page_num = STM_PSEGMENT->private_free_page_num; - /* "mount" it in the segment */ + /* "mount" it in the segment + (XXX later we should again attempt to group together many calls to + d_remap_file_pages() in succession) */ char *new_page = STM_SEGMENT->segment_base + pagenum * 4096UL; d_remap_file_pages(new_page, 4096, NB_PAGES * STM_SEGMENT->segment_num + free_page_num); From noreply at buildbot.pypy.org Sun Mar 16 12:46:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 12:46:06 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Proper fix. Now demo_random passes at least once :-) Message-ID: <20140316114606.264B71C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1046:d9c43a29891e Date: 2014-03-16 12:45 +0100 http://bitbucket.org/pypy/stmgc/changeset/d9c43a29891e/ Log: Proper fix. Now demo_random passes at least once :-) diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -555,8 +555,11 @@ /* invoke the callbacks */ invoke_and_clear_callbacks_on_abort(); - if (STM_SEGMENT->nursery_end == NSE_SIGABORT) - STM_SEGMENT->nursery_end = NURSERY_END; /* done aborting */ + if (STM_SEGMENT->nursery_end == NSE_SIGABORT) { + /* done aborting */ + STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE + : NURSERY_END; + } _finish_transaction(); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -267,6 +267,8 @@ if (get_segment(i)->nursery_end == NURSERY_END) get_segment(i)->nursery_end = NSE_SIGPAUSE; } + assert(!pause_signalled); + pause_signalled = true; } static inline long count_other_threads_sp_running(void) @@ -288,17 +290,14 @@ static void remove_requests_for_safe_point(void) { + assert(pause_signalled); + pause_signalled = false; assert(_safe_points_requested == true); assert((_safe_points_requested = false, 1)); long i; for (i = 1; i <= NB_SEGMENTS; i++) { - /* note: the only possible way to concurrently change the value - of 'nursery_end' is with an abort done while we wait for - C_AT_SAFE_POINT. It's fine because the next transaction - should not start. */ - assert(get_segment(i)->nursery_end != NURSERY_END || - get_priv_segment(i)->transaction_state == TS_NONE); + assert(get_segment(i)->nursery_end != NURSERY_END); if (get_segment(i)->nursery_end == NSE_SIGPAUSE) get_segment(i)->nursery_end = NURSERY_END; } diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -30,3 +30,5 @@ static void wait_for_end_of_inevitable_transaction(bool can_abort); static void synchronize_all_threads(void); + +static bool pause_signalled; From noreply at buildbot.pypy.org Sun Mar 16 13:32:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 13:32:15 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Messy check, needs to be verified... Message-ID: <20140316123215.989CE1D27C0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1047:3716ced45a0b Date: 2014-03-16 13:00 +0100 http://bitbucket.org/pypy/stmgc/changeset/3716ced45a0b/ Log: Messy check, needs to be verified... diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -109,14 +109,23 @@ assert(tree_contains(STM_PSEGMENT->private_page_mapping, ((uintptr_t)obj) / 4096)); - /* check that so far all copies of the object have the flag */ + /* check that so far all copies of the object have the flag + (a bit messy because it's possible that we read a page in + the middle of privatization by another thread) */ +#ifndef NDEBUG long i; + long busy_loop = 1000000000; for (i = 0; i <= NB_SEGMENTS; i++) { - assert(((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) - ->stm_flags & GCFLAG_WRITE_BARRIER); + while (!(((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) + ->stm_flags & GCFLAG_WRITE_BARRIER)) { + spin_loop(); + if (!--busy_loop) + stm_fatalerror("missing GCFLAG_WRITE_BARRIER"); + } } +#endif - /* add the write-barrier-already-called flag ONLY if we succeeded in + /* remove GCFLAG_WRITE_BARRIER, but only if we succeeded in getting the write-lock */ assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; From noreply at buildbot.pypy.org Sun Mar 16 13:32:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 13:32:16 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Tweak: we need after all to have the mutex_pages_lock around Message-ID: <20140316123216.D88701D27C0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1048:1edbabf86da8 Date: 2014-03-16 13:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/1edbabf86da8/ Log: Tweak: we need after all to have the mutex_pages_lock around remappings and around synchronize_object_now(). diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -112,8 +112,8 @@ /* check that so far all copies of the object have the flag (a bit messy because it's possible that we read a page in the middle of privatization by another thread) */ + long i; #ifndef NDEBUG - long i; long busy_loop = 1000000000; for (i = 0; i <= NB_SEGMENTS; i++) { while (!(((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) @@ -263,7 +263,10 @@ /* Copy around the version of 'obj' that lives in our own segment. It is first copied into the shared pages, and then into other segments' own private pages. + + This must be called with the mutex_pages_lock! */ + assert(_has_mutex_pages()); assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); @@ -417,10 +420,12 @@ major_collection_now_at_safe_point(); /* synchronize overflow objects living in privatized pages */ + mutex_pages_lock(); push_overflow_objects_from_privatized_pages(); /* synchronize modified old objects to other threads */ push_modified_to_other_segments(); + mutex_pages_unlock(); /* update 'overflow_number' if needed */ if (STM_PSEGMENT->overflow_number_has_been_used) { diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -81,4 +81,14 @@ mutex_pages_unlock(); return result; } + +void _stm_mutex_pages_lock(void) +{ + mutex_pages_lock(); +} + +void _stm_mutex_pages_unlock(void) +{ + mutex_pages_unlock(); +} #endif diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -198,8 +198,11 @@ WRITE_BARRIER flag and traced into it to fix its content); or add the object to 'large_overflow_objects'. */ - if (STM_PSEGMENT->minor_collect_will_commit_now) + if (STM_PSEGMENT->minor_collect_will_commit_now) { + mutex_pages_lock(); synchronize_object_now(obj, false); + mutex_pages_unlock(); + } else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -39,7 +39,6 @@ __sync_lock_release(&pages_ctl.mutex_pages); } -__attribute__((unused)) static bool _has_mutex_pages(void) { return pages_ctl.mutex_pages != 0; @@ -47,6 +46,7 @@ static uint64_t increment_total_allocated(ssize_t add_or_remove) { + assert(_has_mutex_pages()); pages_ctl.total_allocated += add_or_remove; if (pages_ctl.total_allocated >= pages_ctl.total_allocated_bound) @@ -119,6 +119,10 @@ return; not_found:; + /* lock, to prevent concurrent threads from looking up my own + 'private_page_mapping' in parallel */ + mutex_pages_lock(); + /* look up the next free page */ uintptr_t free_page_num = STM_PSEGMENT->private_free_page_num; @@ -142,6 +146,8 @@ /* update private_page_mapping */ tree_insert(STM_PSEGMENT->private_page_mapping, pagenum, free_page_num); + + mutex_pages_unlock(); } #if 0 diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -27,6 +27,7 @@ static void mutex_pages_lock(void); static void mutex_pages_unlock(void); +static bool _has_mutex_pages(void) __attribute__((unused)); static uint64_t increment_total_allocated(ssize_t add_or_remove); static bool is_major_collection_requested(void); static void force_major_collection_request(void); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -107,6 +107,8 @@ object_t *_stm_enum_modified_old_objects(long index); object_t *_stm_enum_objects_pointing_to_nursery(long index); uint64_t _stm_total_allocated(void); +void _stm_mutex_pages_lock(void); +void _stm_mutex_pages_unlock(void); #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -88,6 +88,8 @@ void stm_collect(long level); uint64_t _stm_total_allocated(void); +void _stm_mutex_pages_lock(void); +void _stm_mutex_pages_unlock(void); long stm_identityhash(object_t *obj); long stm_id(object_t *obj); diff --git a/c7/test/test_largemalloc.py b/c7/test/test_largemalloc.py --- a/c7/test/test_largemalloc.py +++ b/c7/test/test_largemalloc.py @@ -14,6 +14,7 @@ lib.memset(self.rawmem, 0xcd, self.size) lib._stm_largemalloc_init_arena(self.rawmem, self.size) + lib._stm_mutex_pages_lock() # for this file def test_simple(self): d1 = lib._stm_large_malloc(7000) From noreply at buildbot.pypy.org Sun Mar 16 14:34:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 14:34:18 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Refactor after discussion with Remi: don't use a tree to record page Message-ID: <20140316133418.792171C0670@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1049:9eed4e26972b Date: 2014-03-16 14:34 +0100 http://bitbucket.org/pypy/stmgc/changeset/9eed4e26972b/ Log: Refactor after discussion with Remi: don't use a tree to record page remappings, and only do trivial remappings (page N is mapped to either to segment 0's page N, or to itself). Use a simple array of bits. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -8,6 +8,28 @@ memset(write_locks, 0, sizeof(write_locks)); } +static void check_flag_write_barrier(object_t *obj) +{ + /* check that all copies of the object, apart from mine, have the + GCFLAG_WRITE_BARRIER. (a bit messy because it's possible that we + read a page in the middle of privatization by another thread) + */ +#ifndef NDEBUG + long i; + struct object_s *o1; + for (i = 0; i <= NB_SEGMENTS; i++) { + if (i == STM_SEGMENT->segment_num) + continue; + o1 = (struct object_s *)REAL_ADDRESS(get_segment_base(i), obj); + if (!(o1->stm_flags & GCFLAG_WRITE_BARRIER)) { + mutex_pages_lock(); /* try again... */ + if (!(o1->stm_flags & GCFLAG_WRITE_BARRIER)) + stm_fatalerror("missing GCFLAG_WRITE_BARRIER"); + mutex_pages_unlock(); + } + } +#endif +} void _stm_write_slowpath(object_t *obj) { @@ -106,37 +128,20 @@ } /* check that we really have a private page */ - assert(tree_contains(STM_PSEGMENT->private_page_mapping, - ((uintptr_t)obj) / 4096)); + assert(is_private_page(STM_SEGMENT->segment_num, + ((uintptr_t)obj) / 4096)); - /* check that so far all copies of the object have the flag - (a bit messy because it's possible that we read a page in - the middle of privatization by another thread) */ - long i; -#ifndef NDEBUG - long busy_loop = 1000000000; - for (i = 0; i <= NB_SEGMENTS; i++) { - while (!(((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) - ->stm_flags & GCFLAG_WRITE_BARRIER)) { - spin_loop(); - if (!--busy_loop) - stm_fatalerror("missing GCFLAG_WRITE_BARRIER"); - } - } -#endif + /* check that so far all copies of the object have the flag */ + check_flag_write_barrier(obj); /* remove GCFLAG_WRITE_BARRIER, but only if we succeeded in getting the write-lock */ assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - /* for sanity, check that all other segment copies of this object - still have the flag (including the shared copy) */ - for (i = 0; i <= NB_SEGMENTS; i++) { - if (i != STM_SEGMENT->segment_num) - assert(((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) - ->stm_flags & GCFLAG_WRITE_BARRIER); - } + /* for sanity, check again that all other segment copies of this + object still have the flag (so privatization worked) */ + check_flag_write_barrier(obj); } static void reset_transaction_read_version(void) diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -75,13 +75,6 @@ struct stm_priv_segment_info_s { struct stm_segment_info_s pub; - /* Dict whose keys are shared page numbers, and whose values are - the corresponding private page number. */ - struct tree_s *private_page_mapping; - - /* Head of a free list of private pages. */ - uintptr_t private_free_page_num; - /* List of old objects (older than the current transaction) that the current transaction attempts to modify. This is used to track the STM status: they are old objects that where written to and diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -43,9 +43,8 @@ #ifdef STM_TESTS uintptr_t _stm_get_private_page(uintptr_t pagenum) { - wlog_t *item; - TREE_FIND(*STM_PSEGMENT->private_page_mapping, pagenum, item, return 0); - return item->val; + /* xxx returns 0 or 1 now */ + return is_private_page(STM_SEGMENT->segment_num, pagenum); } long _stm_count_modified_old_objects(void) diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -25,6 +25,7 @@ static void teardown_pages(void) { memset(&pages_ctl, 0, sizeof(pages_ctl)); + memset(pages_privatized, 0, sizeof(pages_privatized)); } static void mutex_pages_lock(void) @@ -111,41 +112,30 @@ static void page_privatize(uintptr_t pagenum) { - wlog_t *item; - TREE_FIND(*STM_PSEGMENT->private_page_mapping, pagenum, item, - goto not_found); + if (is_private_page(STM_SEGMENT->segment_num, pagenum)) { + /* the page is already privatized */ + return; + } - /* the page is already privatized */ - return; - - not_found:; - /* lock, to prevent concurrent threads from looking up my own - 'private_page_mapping' in parallel */ + /* lock, to prevent concurrent threads from looking up this thread's + 'pages_privatized' bits in parallel */ mutex_pages_lock(); - /* look up the next free page */ - uintptr_t free_page_num = STM_PSEGMENT->private_free_page_num; - - /* "mount" it in the segment - (XXX later we should again attempt to group together many calls to - d_remap_file_pages() in succession) */ - char *new_page = STM_SEGMENT->segment_base + pagenum * 4096UL; - d_remap_file_pages(new_page, 4096, - NB_PAGES * STM_SEGMENT->segment_num + free_page_num); + /* "unmaps" the page to make the address space location correspond + again to its underlying file offset (XXX later we should again + attempt to group together many calls to d_remap_file_pages() in + succession) */ + uintptr_t pagenum_in_file = NB_PAGES * STM_SEGMENT->segment_num + pagenum; + char *new_page = stm_object_pages + pagenum_in_file * 4096UL; + d_remap_file_pages(new_page, 4096, pagenum_in_file); increment_total_allocated(4096); - /* update private_free_page_num */ - uintptr_t future_page = *(uintptr_t *)new_page; - if (future_page == 0) { - future_page = free_page_num + 1; - } - STM_PSEGMENT->private_free_page_num = future_page; - /* copy the content from the shared (segment 0) source */ pagecopy(new_page, stm_object_pages + pagenum * 4096UL); - /* update private_page_mapping */ - tree_insert(STM_PSEGMENT->private_page_mapping, pagenum, free_page_num); + /* add this thread's 'pages_privatized' bit */ + uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); + pages_privatized[pagenum - PAGE_FLAG_START].by_segment |= bitmask; mutex_pages_unlock(); } diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -3,17 +3,11 @@ has a "shared copy" and zero or more "private copies". The shared copy of a page is stored in the mmap at the file offset - corresponding to the segment 0 offset (with all other segments - remapping to the segment 0 offset). Private copies for segment N are - made in the offset from segment N (for 1 <= N <= NB_SEGMENTS), - picking file offsets that are simply the next free ones. Each - segment maintains a tree 'private_page_mapping', which maps shared - pages to private copies. - - A major collection looks for pages that are no-longer-used private - copies, and discard them, remapping the address to the shared page. - The pages thus freed are recorded into a free list, and can be reused - as the private copies of the following (unrelated) pages. + corresponding to the segment 0 offset. Initially, accessing a page + from segment N remaps to segment 0. If the page is turned private, + then we "un-remap" it to its initial location. The 'pages_privatized' + global array records if a page is currently mapped to segment 0 + (shared page) or to its natural location (private page). Note that this page manipulation logic uses remap_file_pages() to fully hide its execution cost behind the CPU's memory management unit. @@ -22,6 +16,25 @@ (which works at the object granularity, not the page granularity). */ +#define PAGE_FLAG_START END_NURSERY_PAGE +#define PAGE_FLAG_END NB_PAGES + +struct page_shared_s { +#if NB_SEGMENTS <= 8 + uint8_t by_segment; +#elif NB_SEGMENTS <= 16 + uint16_t by_segment; +#elif NB_SEGMENTS <= 32 + uint32_t by_segment; +#elif NB_SEGMENTS <= 64 + uint64_t by_segment; +#else +# error "NB_SEGMENTS > 64 not supported right now" +#endif +}; + +static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START]; + static void page_privatize(uintptr_t pagenum); static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); @@ -35,6 +48,7 @@ static inline bool is_private_page(long segnum, uintptr_t pagenum) { - return tree_contains(get_priv_segment(segnum)->private_page_mapping, - pagenum); + assert(pagenum >= PAGE_FLAG_START); + uint64_t bitmask = 1UL << (segnum - 1); + return (pages_privatized[pagenum - PAGE_FLAG_START].by_segment & bitmask); } diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -58,8 +58,6 @@ pr->write_lock_num = i; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; - pr->private_page_mapping = tree_create(); - pr->private_free_page_num = END_NURSERY_PAGE; pr->objects_pointing_to_nursery = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); From noreply at buildbot.pypy.org Sun Mar 16 14:38:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 14:38:21 +0100 (CET) Subject: [pypy-commit] stmgc default: Base the diffs on llvm r201645 rather than the slightly more recent revision Message-ID: <20140316133821.90D1C1C0670@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1050:23c28d61b36e Date: 2014-03-16 14:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/23c28d61b36e/ Log: Base the diffs on llvm r201645 rather than the slightly more recent revision which seems to have the bug that it doesn't produce gdb information. diff --git a/c7/llvmfix/addrspacecast-in-constant.diff b/c7/llvmfix/addrspacecast-in-constant.diff --- a/c7/llvmfix/addrspacecast-in-constant.diff +++ b/c7/llvmfix/addrspacecast-in-constant.diff @@ -1,8 +1,8 @@ Index: lib/CodeGen/AsmPrinter/AsmPrinter.cpp =================================================================== ---- lib/CodeGen/AsmPrinter/AsmPrinter.cpp (revision 203791) -+++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp (working copy) -@@ -1529,6 +1529,8 @@ +--- lib/CodeGen/AsmPrinter/AsmPrinter.cpp (revision 201645) ++++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp (working copy) +@@ -1617,6 +1617,8 @@ Ctx); } diff --git a/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff b/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff --- a/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff +++ b/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff @@ -1,6 +1,6 @@ Index: lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp =================================================================== ---- lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (revision 203791) +--- lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (revision 201645) +++ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (working copy) @@ -299,6 +299,17 @@ From noreply at buildbot.pypy.org Sun Mar 16 17:22:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 17:22:49 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Resharing pages, still buggy, in-progress Message-ID: <20140316162249.0516C1D287B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1051:234b6a0ba9a5 Date: 2014-03-16 17:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/234b6a0ba9a5/ Log: Resharing pages, still buggy, in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -263,7 +263,62 @@ return false; } -static void synchronize_object_now(object_t *obj, bool assume_local_private) +static void copy_object_to_shared(object_t *obj, int source_segment_num) +{ + /* Only used by major GC. XXX There is a lot of code duplication + with synchronize_object_now() but I don't completely see how to + improve... + */ + assert(_has_mutex_pages()); + assert(!_is_young(obj)); + + uintptr_t start = (uintptr_t)obj; + uintptr_t first_page = start / 4096UL; + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(get_segment(source_segment_num)->segment_base, obj); + + if (realobj->stm_flags & GCFLAG_SMALL_UNIFORM) { + abort();//XXX WRITE THE FAST CASE + } + else { + ssize_t obj_size = stmcb_size_rounded_up(realobj); + assert(obj_size >= 16); + uintptr_t end = start + obj_size; + uintptr_t last_page = (end - 1) / 4096UL; + + for (; first_page <= last_page; first_page++) { + + /* Copy the object into the shared page, if needed */ + if (is_private_page(source_segment_num, first_page)) { + + uintptr_t copy_size; + if (first_page == last_page) { + /* this is the final fragment */ + copy_size = end - start; + } + else { + /* this is a non-final fragment, going up to the + page's end */ + copy_size = 4096 - (start & 4095); + } + /* double-check that the result fits in one page */ + assert(copy_size > 0); + assert(copy_size + (start & 4095) <= 4096); + + char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); + char *dst = REAL_ADDRESS(stm_object_pages, start); + if (copy_size == 4096) + pagecopy(dst, src); + else + memcpy(dst, src, copy_size); + } + + start = (start + 4096) & ~4095; + } + } +} + +static void synchronize_object_now(object_t *obj) { /* Copy around the version of 'obj' that lives in our own segment. It is first copied into the shared pages, and then into other @@ -306,12 +361,9 @@ assert(copy_size + (start & 4095) <= 4096); /* First copy the object into the shared page, if needed */ - assert(IMPLY(assume_local_private, - is_private_page(myself, first_page))); - char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); char *dst = REAL_ADDRESS(stm_object_pages, start); - if (assume_local_private || is_private_page(myself, first_page)) { + if (is_private_page(myself, first_page)) { if (copy_size == 4096) pagecopy(dst, src); else @@ -352,7 +404,7 @@ return; LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, - synchronize_object_now(item, false)); + synchronize_object_now(item)); } static void push_modified_to_other_segments(void) @@ -374,7 +426,7 @@ /* copy the object to the shared page, and to the other private pages as needed */ - synchronize_object_now(item, true); + synchronize_object_now(item); })); list_clear(STM_PSEGMENT->modified_old_objects); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -224,4 +224,5 @@ asm("/* workaround for llvm bug */"); } -static void synchronize_object_now(object_t *obj, bool assume_local_private); +static void copy_object_to_shared(object_t *obj, int source_segment_num); +static void synchronize_object_now(object_t *obj); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -192,130 +192,129 @@ /************************************************************/ +static uintptr_t object_last_page(object_t *obj) +{ + uintptr_t lastbyte; + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); -#if 0 -static inline void mark_single_flag_private(uintptr_t pagenum) -{ - if (flag_page_private[pagenum] == PRIVATE_PAGE) { - assert(pagenum >= END_NURSERY_PAGE); - assert(pagenum < NB_PAGES); - flag_page_private[pagenum] = SEGMENT1_PAGE; + if (realobj->stm_flags & GCFLAG_SMALL_UNIFORM) { + lastbyte = (uintptr_t)obj; } else { - assert(flag_page_private[pagenum] == SHARED_PAGE || - flag_page_private[pagenum] == SEGMENT1_PAGE); + /* get the size of the object */ + size_t obj_size = stmcb_size_rounded_up(realobj); + + /* that's the last byte within the object */ + lastbyte = ((uintptr_t)obj) + obj_size - 1; } + return lastbyte / 4096UL; } -static inline void mark_flag_page_private(object_t *obj, char *segment_base) +/* A macro that expands to: run the 'expression' for every page that + touches objects in the 'modified_old_objects' list. +*/ +#define BITOP(expression) \ + LIST_FOREACH_R( \ + get_priv_segment(segment_num)->modified_old_objects, \ + object_t * /* item */, \ + ({ \ + struct page_shared_s *ps; \ + uintptr_t pagenum = ((uintptr_t)item) / 4096UL; \ + uintptr_t count = object_last_page(item) - pagenum; \ + ps = &pages_privatized[pagenum - PAGE_FLAG_START]; \ + do { \ + expression; \ + ps++; \ + } while (count--); \ + })); + +static void major_hide_private_bits_for_modified_objects(long segment_num) { - uintptr_t first_page = ((uintptr_t)obj) / 4096UL; - - if (LIKELY((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0)) { - mark_single_flag_private(first_page); - } - else { - char *realobj; - size_t obj_size; - uintptr_t end_page; - - /* get the size of the object */ - realobj = REAL_ADDRESS(segment_base, obj); - obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - - /* that's the page *following* the last page with the object */ - end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; - - while (first_page < end_page) - mark_single_flag_private(first_page++); - } + uint64_t negativebitmask = ~(1 << (segment_num - 1)); +#ifndef NDEBUG + BITOP(assert((ps->by_segment & negativebitmask) != ps->by_segment)); +#endif + BITOP(ps->by_segment &= negativebitmask); } -static void major_reshare_pages_range(uintptr_t first_page, uintptr_t end_page) +static void major_restore_private_bits_for_modified_objects(long segment_num) { - uintptr_t i; - for (i = first_page; i < end_page; i++) { + uint64_t positivebitmask = 1 << (segment_num - 1); + BITOP(ps->by_segment |= positivebitmask); +} - switch (flag_page_private[i]) { - - case SEGMENT1_PAGE: - /* this page stays private after major collection */ - flag_page_private[i] = PRIVATE_PAGE; - break; - - case PRIVATE_PAGE:; - /* this page becomes shared again. No object in it was - traced belonging to a segment other than 0. - - XXX This is maybe a too-strict condition, but the more - general condition "all traced objects belong to the same - segment" has problems with large objects in segments > 0. - More precisely: we'd need to keep in the shared page the - content of the objects (from segment > 0), but also the - largemalloc's chunk data (stored in segment 0). - */ -#if NB_SEGMENTS != 2 -# error "limited to NB_SEGMENTS == 2" -#endif - char *ppage0 = get_segment_base(0) + i * 4096; - char *ppage1 = get_segment_base(1) + i * 4096; - - /* two cases for mapping pages to file-pages (fpages): - - (0->0, 1->1) - - (0->1, 1->0) - Distinguish which case it is by hacking a lot */ - - // 0->0,1->1 or 0->1,1->0 - /* map page 1 to fpage 0: */ - d_remap_file_pages(ppage1, 4096, i); - // 0->0,1->0 or 0->1,1->0 - - char oldvalue0 = *ppage0; - char oldvalue1 = *ppage1; - asm("":::"memory"); - *ppage0 = 1 + oldvalue1; - asm("":::"memory"); - char newvalue1 = *ppage1; - asm("":::"memory"); - *ppage0 = oldvalue0; - /* if we are in 0->0,1->0, old and new are different: - In this case we are done. We keep the largemalloc - data structure and objects of ppage0/fpage0 */ - if (oldvalue1 == newvalue1) { - // 0->1,1->0 - /* ppage0/fpage1 has the data structure that we want - in ppage1/fpage0, so we copy it */ - pagecopy(ppage1, ppage0); // copy from page0 to page1, - // i.e. from the underlying memory seg1 to seg0 - d_remap_file_pages(ppage0, 4096, i); - // 0->0,1->0 - } - flag_page_private[i] = SHARED_PAGE; - - increment_total_allocated(-4096 * (NB_SEGMENTS-1)); - break; - - case SHARED_PAGE: - break; /* stay shared */ - - default: - assert(!"unexpected flag_page_private"); - } - } -} +#undef BITOP static void major_reshare_pages(void) { /* re-share pages if possible. Each re-sharing decreases total_allocated by 4096. */ - major_reshare_pages_range( - END_NURSERY_PAGE, /* not the nursery! */ - (uninitialized_page_start - stm_object_pages) / 4096UL); - major_reshare_pages_range( - (uninitialized_page_stop - stm_object_pages) / 4096UL, - NB_PAGES); + + long i; + mutex_pages_lock(); + for (i = 1; i <= NB_SEGMENTS; i++) { + + /* For each segment, push the current overflow objects from + private pages to the corresponding shared pages, if necessary. + */ + struct list_s *lst = get_priv_segment(i)->large_overflow_objects; + if (lst != NULL) { + LIST_FOREACH_R(lst, object_t *, copy_object_to_shared(item, i)); + } + + /* The 'modified_old_objects' list gives the list of objects + whose pages need to remain private. We temporarily remove + these bits from 'pages_privatized', so that these pages will + be skipped by the loop below. + */ + major_hide_private_bits_for_modified_objects(i); + } + + /* Now loop over all pages that are still in 'pages_privatized', + and re-share them. + */ + uintptr_t pagenum, endpagenum; + struct page_shared_s ps; + pagenum = END_NURSERY_PAGE; /* starts after the nursery */ + endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; + + while (1) { + if (UNLIKELY(pagenum == endpagenum)) { + /* we reach this point usually twice, because there are + more pages after 'uninitialized_page_stop' */ + if (endpagenum == NB_PAGES) + break; /* done */ + pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; + endpagenum = NB_PAGES; + if (pagenum == endpagenum) + break; /* no pages in the 2nd section, so done too */ + } + + ps = pages_privatized[pagenum - PAGE_FLAG_START]; + if (ps.by_segment != 0) { + pages_privatized[pagenum - PAGE_FLAG_START].by_segment = 0; + + long j; + for (j = 0; j < NB_SEGMENTS; j++) { + if (ps.by_segment & (1 << j)) { + /* Page 'pagenum' is private in segment 'j + 1'. Reshare */ + page_reshare(j + 1, pagenum); + } + } + } + pagenum++; + } + + /* Done. Now 'pages_privatized' should be entirely zeroes. Restore + the previously-hidden bits + */ + for (i = 1; i <= NB_SEGMENTS; i++) { + major_restore_private_bits_for_modified_objects(i); + } + mutex_pages_unlock(); } -#endif + /************************************************************/ @@ -472,7 +471,9 @@ static void sweep_large_objects(void) { + mutex_pages_lock(); _stm_largemalloc_sweep(); + mutex_pages_unlock(); } static void clean_write_locks(void) @@ -488,7 +489,7 @@ memset(write_locks + lock2_idx, 0, sizeof(write_locks) - lock2_idx); } -static void major_set_write_locks(void) +static void major_restore_write_locks(void) { /* restore the write locks on the modified objects */ long i; @@ -518,6 +519,10 @@ dprintf((" | used before collection: %ld\n", (long)pages_ctl.total_allocated)); + /* reshare pages */ + if (RESHARE_PAGES) + major_reshare_pages(); + /* marking */ LIST_CREATE(mark_objects_to_trace); mark_visit_from_modified_objects(); @@ -531,17 +536,11 @@ clean_up_segment_lists(); /* sweeping */ - mutex_pages_lock(); -#if 0 - if (RESHARE_PAGES) - major_reshare_pages(); -#endif sweep_large_objects(); //sweep_uniform_pages(); - mutex_pages_unlock(); clean_write_locks(); - major_set_write_locks(); + major_restore_write_locks(); dprintf((" | used after collection: %ld\n", (long)pages_ctl.total_allocated)); diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -15,7 +15,7 @@ #define GC_MAJOR_COLLECT 1.82 /* re-share pages after major collections (1 or 0) */ -#define RESHARE_PAGES 0 +#define RESHARE_PAGES 1 diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -200,7 +200,7 @@ */ if (STM_PSEGMENT->minor_collect_will_commit_now) { mutex_pages_lock(); - synchronize_object_now(obj, false); + synchronize_object_now(obj); mutex_pages_unlock(); } else diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -140,6 +140,33 @@ mutex_pages_unlock(); } +static void page_reshare(long segment_num, uintptr_t pagenum) +{ + char *segment_base = get_segment(segment_num)->segment_base; + +#if 0 /* disabled: the private page that we are removing is + typically missing the inter-object information from + largemalloc.c */ + long i, errors=0; + uint64_t *p = (uint64_t *)(stm_object_pages + pagenum * 4096UL); + uint64_t *q = (uint64_t *)(segment_base + pagenum * 4096UL); + for (i = 0; i < 4096 / 8; i++) { + if (p[i] != q[i]) { + fprintf(stderr, "%p: 0x%lx\t\t%p: 0x%lx\n", + &p[i], p[i], &q[i], q[i]); + errors++; + } + } + assert(!errors); +#endif + + madvise(segment_base + pagenum * 4096UL, 4096, MADV_DONTNEED); + d_remap_file_pages(segment_base + pagenum * 4096UL, + 4096, pagenum); + increment_total_allocated(-4096); +} + + #if 0 static bool is_fully_in_shared_pages(object_t *obj) { diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -35,8 +35,9 @@ static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START]; +static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); -static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); +static void page_reshare(long segment_num, uintptr_t pagenum); static void mutex_pages_lock(void); static void mutex_pages_unlock(void); diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -202,7 +202,6 @@ # self.start_transaction() stm_major_collect() - py.test.skip("XXX implement me") assert lib._stm_total_allocated() == 5000 + LMO # shared again def test_reshare_if_no_longer_modified_1(self): From noreply at buildbot.pypy.org Sun Mar 16 17:55:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 17:55:49 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Bug fixes Message-ID: <20140316165549.7686F1D28FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1052:885ed3b0f6ee Date: 2014-03-16 17:55 +0100 http://bitbucket.org/pypy/stmgc/changeset/885ed3b0f6ee/ Log: Bug fixes diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -272,10 +272,11 @@ assert(_has_mutex_pages()); assert(!_is_young(obj)); + char *segment_base = get_segment(source_segment_num)->segment_base; uintptr_t start = (uintptr_t)obj; uintptr_t first_page = start / 4096UL; struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(get_segment(source_segment_num)->segment_base, obj); + REAL_ADDRESS(segment_base, obj); if (realobj->stm_flags & GCFLAG_SMALL_UNIFORM) { abort();//XXX WRITE THE FAST CASE @@ -305,7 +306,7 @@ assert(copy_size > 0); assert(copy_size + (start & 4095) <= 4096); - char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); + char *src = REAL_ADDRESS(segment_base, start); char *dst = REAL_ADDRESS(stm_object_pages, start); if (copy_size == 4096) pagecopy(dst, src); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -253,29 +253,31 @@ long i; mutex_pages_lock(); + for (i = 1; i <= NB_SEGMENTS; i++) { + /* The 'modified_old_objects' list gives the list of objects + whose pages need to remain private. We temporarily remove + these bits from 'pages_privatized', so that these pages will + be skipped by the loop below (and by copy_object_to_shared()). + */ + major_hide_private_bits_for_modified_objects(i); /* For each segment, push the current overflow objects from - private pages to the corresponding shared pages, if necessary. + private pages to the corresponding shared pages, if + necessary. The pages that we will re-share must contain this + data; otherwise, it would exist only in the private pages, + and get lost in the loop below. */ struct list_s *lst = get_priv_segment(i)->large_overflow_objects; if (lst != NULL) { LIST_FOREACH_R(lst, object_t *, copy_object_to_shared(item, i)); } - - /* The 'modified_old_objects' list gives the list of objects - whose pages need to remain private. We temporarily remove - these bits from 'pages_privatized', so that these pages will - be skipped by the loop below. - */ - major_hide_private_bits_for_modified_objects(i); } /* Now loop over all pages that are still in 'pages_privatized', and re-share them. */ uintptr_t pagenum, endpagenum; - struct page_shared_s ps; pagenum = END_NURSERY_PAGE; /* starts after the nursery */ endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; @@ -291,18 +293,7 @@ break; /* no pages in the 2nd section, so done too */ } - ps = pages_privatized[pagenum - PAGE_FLAG_START]; - if (ps.by_segment != 0) { - pages_privatized[pagenum - PAGE_FLAG_START].by_segment = 0; - - long j; - for (j = 0; j < NB_SEGMENTS; j++) { - if (ps.by_segment & (1 << j)) { - /* Page 'pagenum' is private in segment 'j + 1'. Reshare */ - page_reshare(j + 1, pagenum); - } - } - } + page_check_and_reshare(pagenum); pagenum++; } diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -140,30 +140,24 @@ mutex_pages_unlock(); } -static void page_reshare(long segment_num, uintptr_t pagenum) +static void page_reshare(uintptr_t pagenum) { - char *segment_base = get_segment(segment_num)->segment_base; + struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; + pages_privatized[pagenum - PAGE_FLAG_START].by_segment = 0; -#if 0 /* disabled: the private page that we are removing is - typically missing the inter-object information from - largemalloc.c */ - long i, errors=0; - uint64_t *p = (uint64_t *)(stm_object_pages + pagenum * 4096UL); - uint64_t *q = (uint64_t *)(segment_base + pagenum * 4096UL); - for (i = 0; i < 4096 / 8; i++) { - if (p[i] != q[i]) { - fprintf(stderr, "%p: 0x%lx\t\t%p: 0x%lx\n", - &p[i], p[i], &q[i], q[i]); - errors++; + long j, total = 0; + for (j = 0; j < NB_SEGMENTS; j++) { + if (ps.by_segment & (1 << j)) { + /* Page 'pagenum' is private in segment 'j + 1'. Reshare */ + char *segment_base = stm_object_pages + NB_PAGES * 4096UL * (j+1); + + madvise(segment_base + pagenum * 4096UL, 4096, MADV_DONTNEED); + d_remap_file_pages(segment_base + pagenum * 4096UL, + 4096, pagenum); + total -= 4096; } } - assert(!errors); -#endif - - madvise(segment_base + pagenum * 4096UL, 4096, MADV_DONTNEED); - d_remap_file_pages(segment_base + pagenum * 4096UL, - 4096, pagenum); - increment_total_allocated(-4096); + increment_total_allocated(total); } diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -37,7 +37,7 @@ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); -static void page_reshare(long segment_num, uintptr_t pagenum); +static void page_reshare(uintptr_t pagenum); static void mutex_pages_lock(void); static void mutex_pages_unlock(void); @@ -53,3 +53,9 @@ uint64_t bitmask = 1UL << (segnum - 1); return (pages_privatized[pagenum - PAGE_FLAG_START].by_segment & bitmask); } + +static inline void page_check_and_reshare(uintptr_t pagenum) +{ + if (pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0) + page_reshare(pagenum); +} From noreply at buildbot.pypy.org Sun Mar 16 18:11:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 18:11:13 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Comment Message-ID: <20140316171113.7D8961C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1053:4885b404222a Date: 2014-03-16 18:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/4885b404222a/ Log: Comment diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -129,8 +129,10 @@ /* ==================== PUBLIC API ==================== */ -/* Number of segments (i.e. how many threads can be executed in - parallel, in maximum). +/* Number of segments (i.e. how many transactions can be executed in + parallel, in maximum). If you try to start transactions in more + threads than the number of segments, it will block, waiting for the + next segment to become free. */ #define STM_NB_SEGMENTS 4 From noreply at buildbot.pypy.org Sun Mar 16 18:29:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 18:29:49 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: Forgot about get_segment_base() Message-ID: <20140316172949.712BB1C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1054:c7ed97e2f504 Date: 2014-03-16 18:29 +0100 http://bitbucket.org/pypy/stmgc/changeset/c7ed97e2f504/ Log: Forgot about get_segment_base() diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -272,7 +272,7 @@ assert(_has_mutex_pages()); assert(!_is_young(obj)); - char *segment_base = get_segment(source_segment_num)->segment_base; + char *segment_base = get_segment_base(source_segment_num); uintptr_t start = (uintptr_t)obj; uintptr_t first_page = start / 4096UL; struct object_s *realobj = (struct object_s *) diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -149,7 +149,7 @@ for (j = 0; j < NB_SEGMENTS; j++) { if (ps.by_segment & (1 << j)) { /* Page 'pagenum' is private in segment 'j + 1'. Reshare */ - char *segment_base = stm_object_pages + NB_PAGES * 4096UL * (j+1); + char *segment_base = get_segment_base(j + 1); madvise(segment_base + pagenum * 4096UL, 4096, MADV_DONTNEED); d_remap_file_pages(segment_base + pagenum * 4096UL, From noreply at buildbot.pypy.org Sun Mar 16 18:33:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 18:33:38 +0100 (CET) Subject: [pypy-commit] stmgc c7-more-segments: It worked fine by chance: "!segment_num" was always 0, which was the correct value. Message-ID: <20140316173338.A03311C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-more-segments Changeset: r1055:b23d72e6aa6b Date: 2014-03-16 18:33 +0100 http://bitbucket.org/pypy/stmgc/changeset/b23d72e6aa6b/ Log: It worked fine by chance: "!segment_num" was always 0, which was the correct value. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -518,19 +518,17 @@ static void reset_modified_from_other_segments(int segment_num) { - /* pull the right versions from other threads in order + /* pull the right versions from segment 0 in order to reset our pages as part of an abort. Note that this function is also sometimes called from contention.c to clean up the state of a different thread, when we would really like it to be aborted now and it is suspended at a safe-point. - */ struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); - long remote_num = !segment_num; char *local_base = get_segment_base(segment_num); - char *remote_base = get_segment_base(remote_num); + char *remote_base = get_segment_base(0); LIST_FOREACH_R( pseg->modified_old_objects, From noreply at buildbot.pypy.org Sun Mar 16 19:46:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 19:46:28 +0100 (CET) Subject: [pypy-commit] stmgc default: merge heads Message-ID: <20140316184628.C57E61C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1057:526f7325d475 Date: 2014-03-16 19:45 +0100 http://bitbucket.org/pypy/stmgc/changeset/526f7325d475/ Log: merge heads diff --git a/c7/llvmfix/addrspacecast-in-constant.diff b/c7/llvmfix/addrspacecast-in-constant.diff --- a/c7/llvmfix/addrspacecast-in-constant.diff +++ b/c7/llvmfix/addrspacecast-in-constant.diff @@ -1,8 +1,8 @@ Index: lib/CodeGen/AsmPrinter/AsmPrinter.cpp =================================================================== ---- lib/CodeGen/AsmPrinter/AsmPrinter.cpp (revision 203791) -+++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp (working copy) -@@ -1529,6 +1529,8 @@ +--- lib/CodeGen/AsmPrinter/AsmPrinter.cpp (revision 201645) ++++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp (working copy) +@@ -1617,6 +1617,8 @@ Ctx); } diff --git a/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff b/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff --- a/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff +++ b/c7/llvmfix/no-introduce-bogus-cast-in-combine.diff @@ -1,6 +1,6 @@ Index: lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp =================================================================== ---- lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (revision 203791) +--- lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (revision 201645) +++ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (working copy) @@ -299,6 +299,17 @@ From noreply at buildbot.pypy.org Sun Mar 16 19:46:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 19:46:27 +0100 (CET) Subject: [pypy-commit] stmgc default: Merge c7-more-segments. It seems to work, including in a pypy. There Message-ID: <20140316184627.B53771C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1056:1e46b1ad2e26 Date: 2014-03-16 19:45 +0100 http://bitbucket.org/pypy/stmgc/changeset/1e46b1ad2e26/ Log: Merge c7-more-segments. It seems to work, including in a pypy. There is a very small but measurable overhead when compared to the previous version, probably because it makes a bit more copies for now, but I think it's ok. diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -6,6 +6,7 @@ #include "stmgc.h" +#define NTHREADS 3 #define LIST_LENGTH 2000 #define BUNCH 100 @@ -223,7 +224,7 @@ int main(void) { - int status; + int status, i; status = sem_init(&done, 0, 0); assert(status == 0); @@ -233,11 +234,13 @@ setup_list(); - newthread(demo2, (void*)1); - newthread(demo2, (void*)2); + for (i = 1; i <= NTHREADS; i++) { + newthread(demo2, (void*)(uintptr_t)i); + } - status = sem_wait(&done); assert(status == 0); - status = sem_wait(&done); assert(status == 0); + for (i = 1; i <= NTHREADS; i++) { + status = sem_wait(&done); assert(status == 0); + } final_check(); diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -385,7 +385,7 @@ for (i = 0; i < PREBUILT_ROOTS; i++) { void* new_templ = malloc(sizeof(struct node_s)); memcpy(new_templ, &prebuilt_template, sizeof(struct node_s)); - prebuilt_roots[i] = stm_setup_prebuilt((objptr_t)new_templ); + prebuilt_roots[i] = stm_setup_prebuilt((objptr_t)(long)new_templ); if (i % 2 == 0) { int hash = i + 5; diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -235,7 +235,7 @@ uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; if (prev_owner != 0 && prev_owner != STM_PSEGMENT->write_lock_num) { - uint8_t other_segment_num = prev_owner - 1; + uint8_t other_segment_num = prev_owner; assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -8,11 +8,34 @@ memset(write_locks, 0, sizeof(write_locks)); } +static void check_flag_write_barrier(object_t *obj) +{ + /* check that all copies of the object, apart from mine, have the + GCFLAG_WRITE_BARRIER. (a bit messy because it's possible that we + read a page in the middle of privatization by another thread) + */ +#ifndef NDEBUG + long i; + struct object_s *o1; + for (i = 0; i <= NB_SEGMENTS; i++) { + if (i == STM_SEGMENT->segment_num) + continue; + o1 = (struct object_s *)REAL_ADDRESS(get_segment_base(i), obj); + if (!(o1->stm_flags & GCFLAG_WRITE_BARRIER)) { + mutex_pages_lock(); /* try again... */ + if (!(o1->stm_flags & GCFLAG_WRITE_BARRIER)) + stm_fatalerror("missing GCFLAG_WRITE_BARRIER"); + mutex_pages_unlock(); + } + } +#endif +} void _stm_write_slowpath(object_t *obj) { assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); /* is this an object from the same transaction, outside the nursery? */ if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == @@ -29,6 +52,8 @@ safepoints that may be issued in write_write_contention_management(). */ stm_read(obj); + /* XXX XXX XXX make the logic of write-locking objects optional! */ + /* claim the write-lock for this object. In case we're running the same transaction since a long while, the object can be already in 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, @@ -59,12 +84,12 @@ the common case. Otherwise, we need to compute it based on its location and size. */ if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) { - pages_privatize(first_page, 1, true); + page_privatize(first_page); } else { char *realobj; size_t obj_size; - uintptr_t end_page; + uintptr_t i, end_page; /* get the size of the object */ realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); @@ -73,7 +98,9 @@ /* that's the page *following* the last page with the object */ end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; - pages_privatize(first_page, end_page - first_page, true); + for (i = first_page; i < end_page; i++) { + page_privatize(i); + } } } else if (write_locks[lock_idx] == lock_num) { @@ -100,19 +127,21 @@ LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); } - /* add the write-barrier-already-called flag ONLY if we succeeded in + /* check that we really have a private page */ + assert(is_private_page(STM_SEGMENT->segment_num, + ((uintptr_t)obj) / 4096)); + + /* check that so far all copies of the object have the flag */ + check_flag_write_barrier(obj); + + /* remove GCFLAG_WRITE_BARRIER, but only if we succeeded in getting the write-lock */ assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - /* for sanity, check that all other segment copies of this object - still have the flag */ - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - if (i != STM_SEGMENT->segment_num) - assert(((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) - ->stm_flags & GCFLAG_WRITE_BARRIER); - } + /* for sanity, check again that all other segment copies of this + object still have the flag (so privatization worked) */ + check_flag_write_barrier(obj); } static void reset_transaction_read_version(void) @@ -193,59 +222,130 @@ /************************************************************/ -#if NB_SEGMENTS != 2 -# error "The logic in the functions below only works with two segments" -#endif static bool detect_write_read_conflicts(void) { - long remote_num = 1 - STM_SEGMENT->segment_num; - char *remote_base = get_segment_base(remote_num); - uint8_t remote_version = get_segment(remote_num)->transaction_read_version; + /* Detect conflicts of the form: we want to commit a write to an object, + but the same object was also read in a different thread. + */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { - if (get_priv_segment(remote_num)->transaction_state == TS_NONE) - return false; /* no need to check */ + if (i == STM_SEGMENT->segment_num) + continue; - if (is_aborting_now(remote_num)) - return false; /* no need to check: is pending immediate abort */ + if (get_priv_segment(i)->transaction_state == TS_NONE) + continue; /* no need to check */ - LIST_FOREACH_R( - STM_PSEGMENT->modified_old_objects, - object_t * /*item*/, - ({ - if (was_read_remote(remote_base, item, remote_version)) { - /* A write-read conflict! */ - write_read_contention_management(remote_num); + if (is_aborting_now(i)) + continue; /* no need to check: is pending immediate abort */ - /* If we reach this point, we didn't abort, but maybe we - had to wait for the other thread to commit. If we - did, then we have to restart committing from our call - to synchronize_all_threads(). */ - return true; - } - })); + char *remote_base = get_segment_base(i); + uint8_t remote_version = get_segment(i)->transaction_read_version; + + LIST_FOREACH_R( + STM_PSEGMENT->modified_old_objects, + object_t * /*item*/, + ({ + if (was_read_remote(remote_base, item, remote_version)) { + /* A write-read conflict! */ + write_read_contention_management(i); + + /* If we reach this point, we didn't abort, but maybe we + had to wait for the other thread to commit. If we + did, then we have to restart committing from our call + to synchronize_all_threads(). */ + return true; + } + })); + } return false; } -static void synchronize_overflow_object_now(object_t *obj) +static void copy_object_to_shared(object_t *obj, int source_segment_num) { + /* Only used by major GC. XXX There is a lot of code duplication + with synchronize_object_now() but I don't completely see how to + improve... + */ + assert(_has_mutex_pages()); assert(!_is_young(obj)); - assert((obj->stm_flags & GCFLAG_SMALL_UNIFORM) == 0); + + char *segment_base = get_segment_base(source_segment_num); + uintptr_t start = (uintptr_t)obj; + uintptr_t first_page = start / 4096UL; + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(segment_base, obj); + + if (realobj->stm_flags & GCFLAG_SMALL_UNIFORM) { + abort();//XXX WRITE THE FAST CASE + } + else { + ssize_t obj_size = stmcb_size_rounded_up(realobj); + assert(obj_size >= 16); + uintptr_t end = start + obj_size; + uintptr_t last_page = (end - 1) / 4096UL; + + for (; first_page <= last_page; first_page++) { + + /* Copy the object into the shared page, if needed */ + if (is_private_page(source_segment_num, first_page)) { + + uintptr_t copy_size; + if (first_page == last_page) { + /* this is the final fragment */ + copy_size = end - start; + } + else { + /* this is a non-final fragment, going up to the + page's end */ + copy_size = 4096 - (start & 4095); + } + /* double-check that the result fits in one page */ + assert(copy_size > 0); + assert(copy_size + (start & 4095) <= 4096); + + char *src = REAL_ADDRESS(segment_base, start); + char *dst = REAL_ADDRESS(stm_object_pages, start); + if (copy_size == 4096) + pagecopy(dst, src); + else + memcpy(dst, src, copy_size); + } + + start = (start + 4096) & ~4095; + } + } +} + +static void synchronize_object_now(object_t *obj) +{ + /* Copy around the version of 'obj' that lives in our own segment. + It is first copied into the shared pages, and then into other + segments' own private pages. + + This must be called with the mutex_pages_lock! + */ + assert(_has_mutex_pages()); + assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - assert(obj_size >= 16); uintptr_t start = (uintptr_t)obj; - uintptr_t end = start + obj_size; uintptr_t first_page = start / 4096UL; - uintptr_t last_page = (end - 1) / 4096UL; - do { - if (flag_page_private[first_page] != SHARED_PAGE) { - /* The page is a PRIVATE_PAGE. We need to diffuse this fragment - of our object from our own segment to all other segments. */ + if (obj->stm_flags & GCFLAG_SMALL_UNIFORM) { + abort();//XXX WRITE THE FAST CASE + } + else { + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + assert(obj_size >= 16); + uintptr_t end = start + obj_size; + uintptr_t last_page = (end - 1) / 4096UL; + long i, myself = STM_SEGMENT->segment_num; + + for (; first_page <= last_page; first_page++) { uintptr_t copy_size; if (first_page == last_page) { @@ -253,26 +353,50 @@ copy_size = end - start; } else { - /* this is a non-final fragment, going up to the page's end */ + /* this is a non-final fragment, going up to the + page's end */ copy_size = 4096 - (start & 4095); } - /* double-check that the result fits in one page */ assert(copy_size > 0); assert(copy_size + (start & 4095) <= 4096); - long i; + /* First copy the object into the shared page, if needed */ char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); - for (i = 0; i < NB_SEGMENTS; i++) { - if (i != STM_SEGMENT->segment_num) { - char *dst = REAL_ADDRESS(get_segment_base(i), start); + char *dst = REAL_ADDRESS(stm_object_pages, start); + if (is_private_page(myself, first_page)) { + if (copy_size == 4096) + pagecopy(dst, src); + else memcpy(dst, src, copy_size); + } + else { + assert(memcmp(dst, src, copy_size) == 0); /* same page */ + } + + for (i = 1; i <= NB_SEGMENTS; i++) { + if (i == myself) + continue; + + src = REAL_ADDRESS(stm_object_pages, start); + dst = REAL_ADDRESS(get_segment_base(i), start); + if (is_private_page(i, first_page)) { + /* The page is a private page. We need to diffuse this + fragment of object from the shared page to this private + page. */ + if (copy_size == 4096) + pagecopy(dst, src); + else + memcpy(dst, src, copy_size); + } + else { + assert(memcmp(dst, src, copy_size) == 0); /* same page */ } } + + start = (start + 4096) & ~4095; } - - start = (start + 4096) & ~4095; - } while (first_page++ < last_page); + } } static void push_overflow_objects_from_privatized_pages(void) @@ -281,27 +405,15 @@ return; LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, - synchronize_overflow_object_now(item)); + synchronize_object_now(item)); } static void push_modified_to_other_segments(void) { - long remote_num = 1 - STM_SEGMENT->segment_num; - char *local_base = STM_SEGMENT->segment_base; - char *remote_base = get_segment_base(remote_num); - bool remote_active = - (get_priv_segment(remote_num)->transaction_state != TS_NONE && - get_segment(remote_num)->nursery_end != NSE_SIGABORT); - LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, ({ - if (remote_active) { - assert(!was_read_remote(remote_base, item, - get_segment(remote_num)->transaction_read_version)); - } - /* clear the write-lock (note that this runs with all other threads paused, so no need to be careful about ordering) */ uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; @@ -313,11 +425,9 @@ minor_collection() */ assert((item->stm_flags & GCFLAG_WRITE_BARRIER) != 0); - /* copy the modified object to the other segment */ - char *src = REAL_ADDRESS(local_base, item); - char *dst = REAL_ADDRESS(remote_base, item); - ssize_t size = stmcb_size_rounded_up((struct object_s *)src); - memcpy(dst, src, size); + /* copy the object to the shared page, and to the other + private pages as needed */ + synchronize_object_now(item); })); list_clear(STM_PSEGMENT->modified_old_objects); @@ -368,10 +478,12 @@ major_collection_now_at_safe_point(); /* synchronize overflow objects living in privatized pages */ + mutex_pages_lock(); push_overflow_objects_from_privatized_pages(); /* synchronize modified old objects to other threads */ push_modified_to_other_segments(); + mutex_pages_unlock(); /* update 'overflow_number' if needed */ if (STM_PSEGMENT->overflow_number_has_been_used) { @@ -406,19 +518,17 @@ static void reset_modified_from_other_segments(int segment_num) { - /* pull the right versions from other threads in order + /* pull the right versions from segment 0 in order to reset our pages as part of an abort. Note that this function is also sometimes called from contention.c to clean up the state of a different thread, when we would really like it to be aborted now and it is suspended at a safe-point. - */ struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); - long remote_num = !segment_num; char *local_base = get_segment_base(segment_num); - char *remote_base = get_segment_base(remote_num); + char *remote_base = get_segment_base(0); LIST_FOREACH_R( pseg->modified_old_objects, @@ -489,6 +599,7 @@ static void abort_with_mutex(void) { + assert(_has_mutex()); dprintf(("~~~ ABORT\n")); switch (STM_PSEGMENT->transaction_state) { @@ -514,8 +625,11 @@ /* invoke the callbacks */ invoke_and_clear_callbacks_on_abort(); - if (STM_SEGMENT->nursery_end == NSE_SIGABORT) - STM_SEGMENT->nursery_end = NURSERY_END; /* done aborting */ + if (STM_SEGMENT->nursery_end == NSE_SIGABORT) { + /* done aborting */ + STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE + : NURSERY_END; + } _finish_transaction(); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -15,12 +15,12 @@ #define NB_PAGES (1500*256) // 1500MB -#define NB_SEGMENTS 2 +#define NB_SEGMENTS STM_NB_SEGMENTS #define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) #define NB_NURSERY_PAGES (STM_GC_NURSERY/4) -#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_SEGMENTS) +#define TOTAL_MEMORY (NB_PAGES * 4096UL * (1 + NB_SEGMENTS)) #define READMARKER_END ((NB_PAGES * 4096UL) >> 4) #define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) #define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE @@ -178,10 +178,6 @@ static char *stm_object_pages; static stm_thread_local_t *stm_all_thread_locals = NULL; -#ifdef STM_TESTS -static char *stm_other_pages; -#endif - static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START]; @@ -228,4 +224,5 @@ asm("/* workaround for llvm bug */"); } -static void synchronize_overflow_object_now(object_t *obj); +static void copy_object_to_shared(object_t *obj, int source_segment_num); +static void synchronize_object_now(object_t *obj); diff --git a/c7/stm/fprintcolor.h b/c7/stm/fprintcolor.h --- a/c7/stm/fprintcolor.h +++ b/c7/stm/fprintcolor.h @@ -9,7 +9,7 @@ #define dprintf(args) threadcolor_printf args static inline int dprintfcolor(void) { - return 31 + STM_SEGMENT->segment_num % 6; + return 31 + (STM_SEGMENT->segment_num + 5) % 6; } static int threadcolor_printf(const char *format, ...) diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -192,129 +192,121 @@ /************************************************************/ +static uintptr_t object_last_page(object_t *obj) +{ + uintptr_t lastbyte; + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); -static inline void mark_single_flag_private(uintptr_t pagenum) -{ - if (flag_page_private[pagenum] == PRIVATE_PAGE) { - assert(pagenum >= END_NURSERY_PAGE); - assert(pagenum < NB_PAGES); - flag_page_private[pagenum] = SEGMENT1_PAGE; + if (realobj->stm_flags & GCFLAG_SMALL_UNIFORM) { + lastbyte = (uintptr_t)obj; } else { - assert(flag_page_private[pagenum] == SHARED_PAGE || - flag_page_private[pagenum] == SEGMENT1_PAGE); + /* get the size of the object */ + size_t obj_size = stmcb_size_rounded_up(realobj); + + /* that's the last byte within the object */ + lastbyte = ((uintptr_t)obj) + obj_size - 1; } + return lastbyte / 4096UL; } -static inline void mark_flag_page_private(object_t *obj, char *segment_base) +/* A macro that expands to: run the 'expression' for every page that + touches objects in the 'modified_old_objects' list. +*/ +#define BITOP(expression) \ + LIST_FOREACH_R( \ + get_priv_segment(segment_num)->modified_old_objects, \ + object_t * /* item */, \ + ({ \ + struct page_shared_s *ps; \ + uintptr_t pagenum = ((uintptr_t)item) / 4096UL; \ + uintptr_t count = object_last_page(item) - pagenum; \ + ps = &pages_privatized[pagenum - PAGE_FLAG_START]; \ + do { \ + expression; \ + ps++; \ + } while (count--); \ + })); + +static void major_hide_private_bits_for_modified_objects(long segment_num) { - uintptr_t first_page = ((uintptr_t)obj) / 4096UL; - - if (LIKELY((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0)) { - mark_single_flag_private(first_page); - } - else { - char *realobj; - size_t obj_size; - uintptr_t end_page; - - /* get the size of the object */ - realobj = REAL_ADDRESS(segment_base, obj); - obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - - /* that's the page *following* the last page with the object */ - end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; - - while (first_page < end_page) - mark_single_flag_private(first_page++); - } + uint64_t negativebitmask = ~(1 << (segment_num - 1)); +#ifndef NDEBUG + BITOP(assert((ps->by_segment & negativebitmask) != ps->by_segment)); +#endif + BITOP(ps->by_segment &= negativebitmask); } -static void major_reshare_pages_range(uintptr_t first_page, uintptr_t end_page) +static void major_restore_private_bits_for_modified_objects(long segment_num) { - uintptr_t i; - for (i = first_page; i < end_page; i++) { + uint64_t positivebitmask = 1 << (segment_num - 1); + BITOP(ps->by_segment |= positivebitmask); +} - switch (flag_page_private[i]) { - - case SEGMENT1_PAGE: - /* this page stays private after major collection */ - flag_page_private[i] = PRIVATE_PAGE; - break; - - case PRIVATE_PAGE:; - /* this page becomes shared again. No object in it was - traced belonging to a segment other than 0. - - XXX This is maybe a too-strict condition, but the more - general condition "all traced objects belong to the same - segment" has problems with large objects in segments > 0. - More precisely: we'd need to keep in the shared page the - content of the objects (from segment > 0), but also the - largemalloc's chunk data (stored in segment 0). - */ -#if NB_SEGMENTS != 2 -# error "limited to NB_SEGMENTS == 2" -#endif - char *ppage0 = get_segment_base(0) + i * 4096; - char *ppage1 = get_segment_base(1) + i * 4096; - - /* two cases for mapping pages to file-pages (fpages): - - (0->0, 1->1) - - (0->1, 1->0) - Distinguish which case it is by hacking a lot */ - - // 0->0,1->1 or 0->1,1->0 - /* map page 1 to fpage 0: */ - d_remap_file_pages(ppage1, 4096, i); - // 0->0,1->0 or 0->1,1->0 - - char oldvalue0 = *ppage0; - char oldvalue1 = *ppage1; - asm("":::"memory"); - *ppage0 = 1 + oldvalue1; - asm("":::"memory"); - char newvalue1 = *ppage1; - asm("":::"memory"); - *ppage0 = oldvalue0; - /* if we are in 0->0,1->0, old and new are different: - In this case we are done. We keep the largemalloc - data structure and objects of ppage0/fpage0 */ - if (oldvalue1 == newvalue1) { - // 0->1,1->0 - /* ppage0/fpage1 has the data structure that we want - in ppage1/fpage0, so we copy it */ - pagecopy(ppage1, ppage0); // copy from page0 to page1, - // i.e. from the underlying memory seg1 to seg0 - d_remap_file_pages(ppage0, 4096, i); - // 0->0,1->0 - } - flag_page_private[i] = SHARED_PAGE; - - increment_total_allocated(-4096 * (NB_SEGMENTS-1)); - break; - - case SHARED_PAGE: - break; /* stay shared */ - - default: - assert(!"unexpected flag_page_private"); - } - } -} +#undef BITOP static void major_reshare_pages(void) { /* re-share pages if possible. Each re-sharing decreases total_allocated by 4096. */ - major_reshare_pages_range( - END_NURSERY_PAGE, /* not the nursery! */ - (uninitialized_page_start - stm_object_pages) / 4096UL); - major_reshare_pages_range( - (uninitialized_page_stop - stm_object_pages) / 4096UL, - NB_PAGES); + + long i; + mutex_pages_lock(); + + for (i = 1; i <= NB_SEGMENTS; i++) { + /* The 'modified_old_objects' list gives the list of objects + whose pages need to remain private. We temporarily remove + these bits from 'pages_privatized', so that these pages will + be skipped by the loop below (and by copy_object_to_shared()). + */ + major_hide_private_bits_for_modified_objects(i); + + /* For each segment, push the current overflow objects from + private pages to the corresponding shared pages, if + necessary. The pages that we will re-share must contain this + data; otherwise, it would exist only in the private pages, + and get lost in the loop below. + */ + struct list_s *lst = get_priv_segment(i)->large_overflow_objects; + if (lst != NULL) { + LIST_FOREACH_R(lst, object_t *, copy_object_to_shared(item, i)); + } + } + + /* Now loop over all pages that are still in 'pages_privatized', + and re-share them. + */ + uintptr_t pagenum, endpagenum; + pagenum = END_NURSERY_PAGE; /* starts after the nursery */ + endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; + + while (1) { + if (UNLIKELY(pagenum == endpagenum)) { + /* we reach this point usually twice, because there are + more pages after 'uninitialized_page_stop' */ + if (endpagenum == NB_PAGES) + break; /* done */ + pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; + endpagenum = NB_PAGES; + if (pagenum == endpagenum) + break; /* no pages in the 2nd section, so done too */ + } + + page_check_and_reshare(pagenum); + pagenum++; + } + + /* Done. Now 'pages_privatized' should be entirely zeroes. Restore + the previously-hidden bits + */ + for (i = 1; i <= NB_SEGMENTS; i++) { + major_restore_private_bits_for_modified_objects(i); + } + mutex_pages_unlock(); } + /************************************************************/ @@ -323,11 +315,6 @@ /* takes a normal pointer to a thread-local pointer to an object */ object_t *obj = *pobj; - if (obj == NULL || mark_visited_test_and_set(obj)) - return; /* already visited this object */ - - LIST_APPEND(mark_objects_to_trace, obj); - /* Note: this obj might be visited already, but from a different segment. We ignore this case and skip re-visiting the object anyway. The idea is that such an object is old (not from the @@ -338,6 +325,10 @@ segments and only needs visiting once. (It may actually be in a shared page, or maybe not.) */ + if (obj == NULL || mark_visited_test_and_set(obj)) + return; /* already visited this object */ + + LIST_APPEND(mark_objects_to_trace, obj); } static void mark_trace(object_t *obj, char *segment_base) @@ -345,13 +336,6 @@ assert(list_is_empty(mark_objects_to_trace)); while (1) { - - /* first, if we're not seeing segment 0, we must change the - flags in flag_page_private[] from PRIVATE_PAGE to - SEGMENT1_PAGE, which will mean "can't re-share" */ - if (segment_base != stm_object_pages && RESHARE_PAGES) - mark_flag_page_private(obj, segment_base); - /* trace into the object (the version from 'segment_base') */ struct object_s *realobj = (struct object_s *)REAL_ADDRESS(segment_base, obj); @@ -373,45 +357,33 @@ static void mark_visit_from_roots(void) { - if (testing_prebuilt_objs != NULL) { LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/, - mark_visit_object(item, get_segment_base(0))); + mark_visit_object(item, stm_object_pages)); } - /* Do the following twice, so that we trace first the objects from - segment 0, and then all others. XXX This is a hack to make it - more likely that we'll be able to re-share pages. */ + stm_thread_local_t *tl = stm_all_thread_locals; + do { + /* If 'tl' is currently running, its 'associated_segment_num' + field is the segment number that contains the correct + version of its overflowed objects. If not, then the + field is still some correct segment number, and it doesn't + matter which one we pick. */ + char *segment_base = get_segment_base(tl->associated_segment_num); - int must_be_zero; - for (must_be_zero = 1; must_be_zero >= 0; must_be_zero--) { + struct stm_shadowentry_s *current = tl->shadowstack; + struct stm_shadowentry_s *base = tl->shadowstack_base; + while (current-- != base) { + assert(current->ss != (object_t *)-1); + mark_visit_object(current->ss, segment_base); + } + mark_visit_object(tl->thread_local_obj, segment_base); - stm_thread_local_t *tl = stm_all_thread_locals; - do { - /* If 'tl' is currently running, its 'associated_segment_num' - field is the segment number that contains the correct - version of its overflowed objects. If not, then the - field is still some correct segment number, and it doesn't - matter which one we pick. */ - char *segment_base = get_segment_base(tl->associated_segment_num); - - if (must_be_zero == (segment_base == get_segment_base(0))) { - - struct stm_shadowentry_s *current = tl->shadowstack; - struct stm_shadowentry_s *base = tl->shadowstack_base; - while (current-- != base) { - assert(current->ss != (object_t *)-1); - mark_visit_object(current->ss, segment_base); - } - mark_visit_object(tl->thread_local_obj, segment_base); - } - - tl = tl->next; - } while (tl != stm_all_thread_locals); - } + tl = tl->next; + } while (tl != stm_all_thread_locals); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (get_priv_segment(i)->transaction_state != TS_NONE) mark_visit_object( get_priv_segment(i)->threadlocal_at_start_of_transaction, @@ -422,20 +394,21 @@ static void mark_visit_from_modified_objects(void) { /* The modified objects are the ones that may exist in two different - versions: one in the segment that modified it, and another in - all other segments. */ + versions: one in the segment that modified it, and another in all + other segments. (It can also be more than two if we don't have + eager write locking.) + */ long i; - for (i = 0; i < NB_SEGMENTS; i++) { - char *base1 = get_segment_base(i); /* two different segments */ - char *base2 = get_segment_base(!i); + for (i = 1; i <= NB_SEGMENTS; i++) { + char *base = get_segment_base(i); LIST_FOREACH_R( get_priv_segment(i)->modified_old_objects, object_t * /*item*/, ({ mark_visited_test_and_set(item); - mark_trace(item, base1); - mark_trace(item, base2); + mark_trace(item, stm_object_pages); /* shared version */ + mark_trace(item, base); /* private version */ })); } } @@ -443,7 +416,7 @@ static void clean_up_segment_lists(void) { long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); struct list_s *lst; @@ -489,7 +462,9 @@ static void sweep_large_objects(void) { + mutex_pages_lock(); _stm_largemalloc_sweep(); + mutex_pages_unlock(); } static void clean_write_locks(void) @@ -505,11 +480,11 @@ memset(write_locks + lock2_idx, 0, sizeof(write_locks) - lock2_idx); } -static void major_set_write_locks(void) +static void major_restore_write_locks(void) { /* restore the write locks on the modified objects */ long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); LIST_FOREACH_R( @@ -535,6 +510,10 @@ dprintf((" | used before collection: %ld\n", (long)pages_ctl.total_allocated)); + /* reshare pages */ + if (RESHARE_PAGES) + major_reshare_pages(); + /* marking */ LIST_CREATE(mark_objects_to_trace); mark_visit_from_modified_objects(); @@ -548,15 +527,11 @@ clean_up_segment_lists(); /* sweeping */ - mutex_pages_lock(); - if (RESHARE_PAGES) - major_reshare_pages(); sweep_large_objects(); //sweep_uniform_pages(); - mutex_pages_unlock(); clean_write_locks(); - major_set_write_locks(); + major_restore_write_locks(); dprintf((" | used after collection: %ld\n", (long)pages_ctl.total_allocated)); diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -15,7 +15,7 @@ #define GC_MAJOR_COLLECT 1.82 /* re-share pages after major collections (1 or 0) */ -#define RESHARE_PAGES 0 +#define RESHARE_PAGES 1 diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -41,9 +41,10 @@ } #ifdef STM_TESTS -uint8_t _stm_get_page_flag(uintptr_t index) +uintptr_t _stm_get_private_page(uintptr_t pagenum) { - return flag_page_private[index]; + /* xxx returns 0 or 1 now */ + return is_private_page(STM_SEGMENT->segment_num, pagenum); } long _stm_count_modified_old_objects(void) @@ -79,4 +80,14 @@ mutex_pages_unlock(); return result; } + +void _stm_mutex_pages_lock(void) +{ + mutex_pages_lock(); +} + +void _stm_mutex_pages_unlock(void) +{ + mutex_pages_unlock(); +} #endif diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -26,7 +26,7 @@ _stm_nursery_start = NURSERY_START; long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { get_segment(i)->nursery_current = (stm_char *)NURSERY_START; get_segment(i)->nursery_end = NURSERY_END; } @@ -198,8 +198,11 @@ WRITE_BARRIER flag and traced into it to fix its content); or add the object to 'large_overflow_objects'. */ - if (STM_PSEGMENT->minor_collect_will_commit_now) - synchronize_overflow_object_now(obj); + if (STM_PSEGMENT->minor_collect_will_commit_now) { + mutex_pages_lock(); + synchronize_object_now(obj); + mutex_pages_unlock(); + } else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } @@ -378,7 +381,7 @@ _stm_nursery_start = NURSERY_END - free_count; long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if ((uintptr_t)get_segment(i)->nursery_current < _stm_nursery_start) get_segment(i)->nursery_current = (stm_char *)_stm_nursery_start; } @@ -411,7 +414,7 @@ int original_num = STM_SEGMENT->segment_num; long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); if (MINOR_NOTHING_TO_DO(pseg)) /*TS_NONE segments have NOTHING_TO_DO*/ continue; diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -25,6 +25,7 @@ static void teardown_pages(void) { memset(&pages_ctl, 0, sizeof(pages_ctl)); + memset(pages_privatized, 0, sizeof(pages_privatized)); } static void mutex_pages_lock(void) @@ -39,7 +40,6 @@ __sync_lock_release(&pages_ctl.mutex_pages); } -__attribute__((unused)) static bool _has_mutex_pages(void) { return pages_ctl.mutex_pages != 0; @@ -47,6 +47,7 @@ static uint64_t increment_total_allocated(ssize_t add_or_remove) { + assert(_has_mutex_pages()); pages_ctl.total_allocated += add_or_remove; if (pages_ctl.total_allocated >= pages_ctl.total_allocated_bound) @@ -102,100 +103,64 @@ segment 0. */ uintptr_t i; assert(_has_mutex_pages()); - for (i = 1; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); d_remap_file_pages(segment_base + pagenum * 4096UL, count * 4096UL, pagenum); } - for (i = 0; i < count; i++) - flag_page_private[pagenum + i] = SHARED_PAGE; } -#if 0 -static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count) +static void page_privatize(uintptr_t pagenum) { - /* Same as pages_initialize_shared(), but tries hard to minimize the - total number of pages that remap_file_pages() must handle, by - fragmenting calls as much as possible (the overhead of one system - call appears smaller as the overhead per page). */ - uintptr_t start, i = 0; - while (i < count) { - if (flag_page_private[pagenum + (i++)] == SHARED_PAGE) - continue; - start = i; /* first index of a private page */ - while (1) { - i++; - if (i == count || flag_page_private[pagenum + i] == SHARED_PAGE) - break; - } - pages_initialize_shared(pagenum + start, i - start); - } -} -#endif - -static void privatize_range(uintptr_t pagenum, uintptr_t count, bool full) -{ - ssize_t pgoff1 = pagenum; - ssize_t pgoff2 = pagenum + NB_PAGES; - ssize_t localpgoff = pgoff1 + NB_PAGES * STM_SEGMENT->segment_num; - ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - STM_SEGMENT->segment_num); - - void *localpg = stm_object_pages + localpgoff * 4096UL; - void *otherpg = stm_object_pages + otherpgoff * 4096UL; - - memset(flag_page_private + pagenum, REMAPPING_PAGE, count); - d_remap_file_pages(localpg, count * 4096, pgoff2); - uintptr_t i; - if (full) { - for (i = 0; i < count; i++) { - pagecopy(localpg + 4096 * i, otherpg + 4096 * i); - } - } - else { - pagecopy(localpg, otherpg); - if (count > 1) - pagecopy(localpg + 4096 * (count-1), otherpg + 4096 * (count-1)); - } - write_fence(); - memset(flag_page_private + pagenum, PRIVATE_PAGE, count); - increment_total_allocated(4096 * count); -} - -static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) -{ - /* narrow the range of pages to privatize from the end: */ - while (flag_page_private[pagenum + count - 1] == PRIVATE_PAGE) { - if (!--count) - return; + if (is_private_page(STM_SEGMENT->segment_num, pagenum)) { + /* the page is already privatized */ + return; } + /* lock, to prevent concurrent threads from looking up this thread's + 'pages_privatized' bits in parallel */ mutex_pages_lock(); - uintptr_t page_start_range = pagenum; - uintptr_t pagestop = pagenum + count; + /* "unmaps" the page to make the address space location correspond + again to its underlying file offset (XXX later we should again + attempt to group together many calls to d_remap_file_pages() in + succession) */ + uintptr_t pagenum_in_file = NB_PAGES * STM_SEGMENT->segment_num + pagenum; + char *new_page = stm_object_pages + pagenum_in_file * 4096UL; + d_remap_file_pages(new_page, 4096, pagenum_in_file); + increment_total_allocated(4096); - for (; pagenum < pagestop; pagenum++) { - uint8_t prev = flag_page_private[pagenum]; - if (prev == PRIVATE_PAGE) { - if (pagenum > page_start_range) { - privatize_range(page_start_range, - pagenum - page_start_range, full); - } - page_start_range = pagenum + 1; - } - else { - assert(prev == SHARED_PAGE); - } - } + /* copy the content from the shared (segment 0) source */ + pagecopy(new_page, stm_object_pages + pagenum * 4096UL); - if (pagenum > page_start_range) { - privatize_range(page_start_range, - pagenum - page_start_range, full); - } + /* add this thread's 'pages_privatized' bit */ + uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); + pages_privatized[pagenum - PAGE_FLAG_START].by_segment |= bitmask; mutex_pages_unlock(); } +static void page_reshare(uintptr_t pagenum) +{ + struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; + pages_privatized[pagenum - PAGE_FLAG_START].by_segment = 0; + + long j, total = 0; + for (j = 0; j < NB_SEGMENTS; j++) { + if (ps.by_segment & (1 << j)) { + /* Page 'pagenum' is private in segment 'j + 1'. Reshare */ + char *segment_base = get_segment_base(j + 1); + + madvise(segment_base + pagenum * 4096UL, 4096, MADV_DONTNEED); + d_remap_file_pages(segment_base + pagenum * 4096UL, + 4096, pagenum); + total -= 4096; + } + } + increment_total_allocated(total); +} + + #if 0 static bool is_fully_in_shared_pages(object_t *obj) { diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -1,48 +1,61 @@ -enum /* flag_page_private */ { - /* The page is not in use. Assume that each segment sees its own copy. */ - FREE_PAGE=0, +/* This handles pages of objects outside the nursery. Every page + has a "shared copy" and zero or more "private copies". - /* The page is shared by all segments. Each segment sees the same - physical page (the one that is within the segment 0 mmap address). */ - SHARED_PAGE, + The shared copy of a page is stored in the mmap at the file offset + corresponding to the segment 0 offset. Initially, accessing a page + from segment N remaps to segment 0. If the page is turned private, + then we "un-remap" it to its initial location. The 'pages_privatized' + global array records if a page is currently mapped to segment 0 + (shared page) or to its natural location (private page). - /* For only one range of pages at a time, around the call to - remap_file_pages() that un-shares the pages (SHARED -> PRIVATE). */ - REMAPPING_PAGE, + Note that this page manipulation logic uses remap_file_pages() to + fully hide its execution cost behind the CPU's memory management unit. + It should not be confused with the logic of tracking which objects + are old-and-committed, old-but-modified, overflow objects, and so on + (which works at the object granularity, not the page granularity). +*/ - /* Page is private for each segment. */ - PRIVATE_PAGE, +#define PAGE_FLAG_START END_NURSERY_PAGE +#define PAGE_FLAG_END NB_PAGES - /* gcpage.c: page contains objects that have been traced in the - segment > 0 */ - SEGMENT1_PAGE, +struct page_shared_s { +#if NB_SEGMENTS <= 8 + uint8_t by_segment; +#elif NB_SEGMENTS <= 16 + uint16_t by_segment; +#elif NB_SEGMENTS <= 32 + uint32_t by_segment; +#elif NB_SEGMENTS <= 64 + uint64_t by_segment; +#else +# error "NB_SEGMENTS > 64 not supported right now" +#endif }; -static uint8_t flag_page_private[NB_PAGES]; +static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START]; -static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full); static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); -//static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count); +static void page_privatize(uintptr_t pagenum); +static void page_reshare(uintptr_t pagenum); static void mutex_pages_lock(void); static void mutex_pages_unlock(void); +static bool _has_mutex_pages(void) __attribute__((unused)); static uint64_t increment_total_allocated(ssize_t add_or_remove); static bool is_major_collection_requested(void); static void force_major_collection_request(void); static void reset_major_collection_requested(void); -inline static void pages_privatize(uintptr_t pagenum, uintptr_t count, - bool full) { - /* This is written a bit carefully so that a call with a constant - count == 1 will turn this loop into just one "if". */ - while (flag_page_private[pagenum] == PRIVATE_PAGE) { - if (!--count) { - return; - } - pagenum++; - } - _pages_privatize(pagenum, count, full); +static inline bool is_private_page(long segnum, uintptr_t pagenum) +{ + assert(pagenum >= PAGE_FLAG_START); + uint64_t bitmask = 1UL << (segnum - 1); + return (pages_privatized[pagenum - PAGE_FLAG_START].by_segment & bitmask); } -/* static bool is_fully_in_shared_pages(object_t *obj); */ +static inline void page_check_and_reshare(uintptr_t pagenum) +{ + if (pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0) + page_reshare(pagenum); +} diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -26,12 +26,15 @@ if (stm_object_pages == MAP_FAILED) stm_fatalerror("initial stm_object_pages mmap() failed: %m\n"); + /* The segment 0 is not used to run transactions, but to contain the + shared copy of the pages. We mprotect all pages before so that + accesses fail, up to and including the pages corresponding to the + nurseries of the other segments. */ + mprotect(stm_object_pages, END_NURSERY_PAGE * 4096UL, PROT_NONE); + long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); -#ifdef STM_TESTS - stm_other_pages = segment_base; -#endif /* In each segment, the first page is where TLPREFIX'ed NULL accesses land. We mprotect it so that accesses fail. */ @@ -39,7 +42,7 @@ /* Fill the TLS page (page 1) with 0xDC, for debugging */ memset(REAL_ADDRESS(segment_base, 4096), 0xDC, 4096); - /* Make a "hole" at STM_PSEGMENT */ + /* Make a "hole" at STM_PSEGMENT (which includes STM_SEGMENT) */ memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, sizeof(*STM_PSEGMENT)); @@ -49,9 +52,10 @@ (FIRST_READMARKER_PAGE - 2) * 4096UL, PROT_NONE); + /* Initialize STM_PSEGMENT */ struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(i + 1 < 255); /* 255 is WL_VISITED in gcpage.c */ - pr->write_lock_num = i + 1; + assert(1 <= i && i < 255); /* 255 is WL_VISITED in gcpage.c */ + pr->write_lock_num = i; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; pr->objects_pointing_to_nursery = NULL; @@ -62,7 +66,7 @@ pr->young_outside_nursery = tree_create(); pr->nursery_objects_shadows = tree_create(); pr->callbacks_on_abort = tree_create(); - pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * (i + 1); + pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; } @@ -73,10 +77,6 @@ STM_SEGMENT->transaction_read_version never contains zero, so a null read marker means "not read" whatever the current transaction_read_version is. - - The creation markers are initially zero, which is correct: - it means "objects of this line of 256 bytes have not been - allocated by the current transaction." */ setup_sync(); @@ -92,7 +92,7 @@ assert(!_has_mutex()); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); @@ -107,8 +107,6 @@ munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; - memset(flag_page_private, 0, sizeof(flag_page_private)); - teardown_core(); teardown_sync(); teardown_gcpage(); @@ -146,14 +144,14 @@ tl->prev = stm_all_thread_locals->prev; stm_all_thread_locals->prev->next = tl; stm_all_thread_locals->prev = tl; - num = tl->prev->associated_segment_num + 1; + num = tl->prev->associated_segment_num; } tl->thread_local_obj = NULL; /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own numbers automatically. */ - num = num % NB_SEGMENTS; + num = (num % NB_SEGMENTS) + 1; tl->associated_segment_num = num; _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -30,7 +30,7 @@ pthread_mutex_t global_mutex; pthread_cond_t cond[_C_TOTAL]; /* some additional pieces of global state follow */ - uint8_t in_use[NB_SEGMENTS]; /* 1 if running a pthread */ + uint8_t in_use1[NB_SEGMENTS]; /* 1 if running a pthread */ uint64_t global_time; }; char reserved[192]; @@ -124,12 +124,12 @@ { long i; restart: - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { if (can_abort) { /* handle this case like a contention: it will either abort us (not the other thread, which is inevitable), - or for a while. If we go past this call, then we + or wait for a while. If we go past this call, then we waited; in this case we have to re-check if no other thread is inevitable. */ inevitable_contention_management(i); @@ -152,7 +152,7 @@ assert(_is_tl_registered(tl)); int num = tl->associated_segment_num; - if (sync_ctl.in_use[num] == 0) { + if (sync_ctl.in_use1[num - 1] == 0) { /* fast-path: we can get the same segment number than the one we had before. The value stored in GS is still valid. */ #ifdef STM_TESTS @@ -165,10 +165,10 @@ } /* Look for the next free segment. If there is none, wait for the condition variable. */ - int i; - for (i = 0; i < NB_SEGMENTS; i++) { - num = (num + 1) % NB_SEGMENTS; - if (sync_ctl.in_use[num] == 0) { + int retries; + for (retries = 0; retries < NB_SEGMENTS; retries++) { + num = (num % NB_SEGMENTS) + 1; + if (sync_ctl.in_use1[num - 1] == 0) { /* we're getting 'num', a different number. */ dprintf(("acquired different segment: %d->%d\n", tl->associated_segment_num, num)); tl->associated_segment_num = num; @@ -184,7 +184,7 @@ return false; got_num: - sync_ctl.in_use[num] = 1; + sync_ctl.in_use1[num - 1] = 1; assert(STM_SEGMENT->segment_num == num); assert(STM_SEGMENT->running_thread == NULL); STM_SEGMENT->running_thread = tl; @@ -208,8 +208,8 @@ assert(STM_SEGMENT->running_thread == tl); STM_SEGMENT->running_thread = NULL; - assert(sync_ctl.in_use[tl->associated_segment_num] == 1); - sync_ctl.in_use[tl->associated_segment_num] = 0; + assert(sync_ctl.in_use1[tl->associated_segment_num - 1] == 1); + sync_ctl.in_use1[tl->associated_segment_num - 1] = 0; } __attribute__((unused)) @@ -221,7 +221,7 @@ bool _stm_in_transaction(stm_thread_local_t *tl) { int num = tl->associated_segment_num; - assert(num < NB_SEGMENTS); + assert(1 <= num && num <= NB_SEGMENTS); return get_segment(num)->running_thread == tl; } @@ -260,12 +260,15 @@ { assert(_safe_points_requested == false); assert((_safe_points_requested = true, 1)); + assert(_has_mutex()); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (get_segment(i)->nursery_end == NURSERY_END) get_segment(i)->nursery_end = NSE_SIGPAUSE; } + assert(!pause_signalled); + pause_signalled = true; } static inline long count_other_threads_sp_running(void) @@ -276,7 +279,7 @@ long result = 0; int my_num = STM_SEGMENT->segment_num; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (i != my_num && get_priv_segment(i)->safe_point == SP_RUNNING) { assert(get_segment(i)->nursery_end <= _STM_NSE_SIGNAL_MAX); result++; @@ -287,11 +290,13 @@ static void remove_requests_for_safe_point(void) { + assert(pause_signalled); + pause_signalled = false; assert(_safe_points_requested == true); assert((_safe_points_requested = false, 1)); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { assert(get_segment(i)->nursery_end != NURSERY_END); if (get_segment(i)->nursery_end == NSE_SIGPAUSE) get_segment(i)->nursery_end = NURSERY_END; diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -30,3 +30,5 @@ static void wait_for_end_of_inevitable_transaction(bool can_abort); static void synchronize_all_threads(void); + +static bool pause_signalled; diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -32,23 +32,18 @@ ssize_t size = 16; stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size); - if (flag_page_private[(uintptr_t)point_to_loc / 4096UL] == PRIVATE_PAGE) { - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - char *base = get_segment_base(i); /* two different segments */ - object_t ** ref_loc = (object_t **)REAL_ADDRESS(base, point_to_loc); - *ref_loc = value; - } - } - else { - *WEAKREF_PTR(weakref, size) = value; + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *base = get_segment_base(i); + object_t ** ref_loc = (object_t **)REAL_ADDRESS(base, point_to_loc); + *ref_loc = value; } } /***** Minor collection *****/ -static void stm_move_young_weakrefs() +static void stm_move_young_weakrefs(void) { /* The code relies on the fact that no weakref can be an old object weakly pointing to a young object. Indeed, weakrefs are immutable @@ -115,7 +110,7 @@ static void stm_visit_old_weakrefs(void) { long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); struct list_s *lst; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -87,7 +87,7 @@ #include bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); -uint8_t _stm_get_page_flag(uintptr_t index); +uintptr_t _stm_get_private_page(uintptr_t pagenum); bool _stm_in_transaction(stm_thread_local_t *tl); char *_stm_get_segment_base(long index); void _stm_test_switch(stm_thread_local_t *tl); @@ -107,6 +107,8 @@ object_t *_stm_enum_modified_old_objects(long index); object_t *_stm_enum_objects_pointing_to_nursery(long index); uint64_t _stm_total_allocated(void); +void _stm_mutex_pages_lock(void); +void _stm_mutex_pages_unlock(void); #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 @@ -127,6 +129,14 @@ /* ==================== PUBLIC API ==================== */ +/* Number of segments (i.e. how many transactions can be executed in + parallel, in maximum). If you try to start transactions in more + threads than the number of segments, it will block, waiting for the + next segment to become free. +*/ +#define STM_NB_SEGMENTS 4 + + /* Structure of objects -------------------- diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -9,6 +9,7 @@ typedef ... object_t; typedef ... stm_jmpbuf_t; #define SIZEOF_MYOBJ ... +#define STM_NB_SEGMENTS ... #define _STM_FAST_ALLOC ... #define _STM_GCFLAG_WRITE_BARRIER ... @@ -46,7 +47,7 @@ char *_stm_get_segment_base(long index); bool _stm_in_transaction(stm_thread_local_t *tl); void _stm_test_switch(stm_thread_local_t *tl); -uint8_t _stm_get_page_flag(uintptr_t index); +uintptr_t _stm_get_private_page(uintptr_t pagenum); int _stm_get_flags(object_t *obj); void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); @@ -87,6 +88,8 @@ void stm_collect(long level); uint64_t _stm_total_allocated(void); +void _stm_mutex_pages_lock(void); +void _stm_mutex_pages_unlock(void); long stm_identityhash(object_t *obj); long stm_id(object_t *obj); @@ -98,8 +101,6 @@ GC_N_SMALL_REQUESTS = 36 # from gcpage.c -SHARED_PAGE = 1 # from pages.h -PRIVATE_PAGE = 3 # from pages.h LARGE_MALLOC_OVERHEAD = 16 # from largemalloc.h lib = ffi.verify(''' @@ -262,6 +263,7 @@ HDR = lib.SIZEOF_MYOBJ assert HDR == 8 GCFLAG_WRITE_BARRIER = lib._STM_GCFLAG_WRITE_BARRIER +NB_SEGMENTS = lib.STM_NB_SEGMENTS class Conflict(Exception): @@ -361,8 +363,8 @@ def stm_major_collect(): lib.stm_collect(1) -def stm_get_page_flag(pagenum): - return lib._stm_get_page_flag(pagenum) +def stm_get_private_page(pagenum): + return lib._stm_get_private_page(pagenum) def stm_get_obj_size(o): return lib.stmcb_size_rounded_up(stm_get_real_address(o)) @@ -402,10 +404,11 @@ class BaseTest(object): + NB_THREADS = 2 def setup_method(self, meth): lib.stm_setup() - self.tls = [_allocate_thread_local(), _allocate_thread_local()] + self.tls = [_allocate_thread_local() for i in range(self.NB_THREADS)] self.current_thread = 0 def teardown_method(self, meth): diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -24,8 +24,8 @@ new = self.pop_root() assert len(stm_get_obj_pages(new)) == 2 - assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - == [SHARED_PAGE]*2) + assert ([stm_get_private_page(p) for p in stm_get_obj_pages(new)] + == [0, 0]) assert not is_in_nursery(new) stm_write(new) @@ -33,11 +33,11 @@ # now proceed to write into the object in a new transaction self.start_transaction() - assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - == [SHARED_PAGE]*2) + assert ([stm_get_private_page(p) for p in stm_get_obj_pages(new)] + == [0, 0]) stm_write(new) - assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - == [PRIVATE_PAGE]*2) + assert ([bool(stm_get_private_page(p)) for p in stm_get_obj_pages(new)] + == [True, True]) # write to 2nd page of object!! wnew = stm_get_real_address(new) @@ -52,8 +52,8 @@ self.switch(0) self.abort_transaction() - assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - == [PRIVATE_PAGE]*2) + assert ([bool(stm_get_private_page(p)) for p in stm_get_obj_pages(new)] + == [True, True]) def test_partial_alloced_pages(self): self.start_transaction() @@ -62,14 +62,14 @@ stm_minor_collect() new = self.pop_root() - assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == SHARED_PAGE + assert stm_get_private_page(stm_get_obj_pages(new)[0]) == 0 assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER stm_write(new) assert not (stm_get_flags(new) & GCFLAG_WRITE_BARRIER) self.commit_transaction() - assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == SHARED_PAGE + assert stm_get_private_page(stm_get_obj_pages(new)[0]) == 0 assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER self.start_transaction() @@ -78,7 +78,7 @@ stm_minor_collect() newer = self.pop_root() # 'new' is still in shared_page and committed - assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == SHARED_PAGE + assert stm_get_private_page(stm_get_obj_pages(new)[0]) == 0 assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER # 'newer' is now part of the SHARED page with 'new', but # uncommitted, so no privatization has to take place: @@ -86,10 +86,10 @@ assert stm_get_flags(newer) & GCFLAG_WRITE_BARRIER stm_write(newer) # does not privatize assert not (stm_get_flags(newer) & GCFLAG_WRITE_BARRIER) - assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == SHARED_PAGE + assert stm_get_private_page(stm_get_obj_pages(newer)[0]) == 0 self.commit_transaction() - assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == SHARED_PAGE + assert stm_get_private_page(stm_get_obj_pages(newer)[0]) == 0 assert stm_get_flags(newer) & GCFLAG_WRITE_BARRIER def test_major_collection(self): @@ -202,7 +202,6 @@ # self.start_transaction() stm_major_collect() - py.test.skip("XXX implement me") assert lib._stm_total_allocated() == 5000 + LMO # shared again def test_reshare_if_no_longer_modified_1(self): diff --git a/c7/test/test_largemalloc.py b/c7/test/test_largemalloc.py --- a/c7/test/test_largemalloc.py +++ b/c7/test/test_largemalloc.py @@ -14,6 +14,7 @@ lib.memset(self.rawmem, 0xcd, self.size) lib._stm_largemalloc_init_arena(self.rawmem, self.size) + lib._stm_mutex_pages_lock() # for this file def test_simple(self): d1 = lib._stm_large_malloc(7000) diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -54,7 +54,7 @@ e.g. maintains read/write sets. The state will be discarded on abort or pushed to other threads""" - def __init__(self, start_time): + def __init__(self, start_time, thread_num=None): self.read_set = set() self.write_set = set() self.values = {} @@ -63,6 +63,7 @@ self.objs_in_conflict = set() self.inevitable = False self.created_in_this_transaction = set() + self.thread_num = thread_num def get_old_modified(self): # returns only the ones that are modified and not from @@ -74,6 +75,8 @@ if objs_in_conflict is not None: self.objs_in_conflict |= objs_in_conflict self._must_abort = True + color = "\033[%dm" % (31 + self.thread_num % 6) + print >> sys.stderr, color + "# must abort: %r\033[0m" % (objs_in_conflict,) def check_must_abort(self): return self._must_abort @@ -180,10 +183,10 @@ r, int(ffi.cast("uintptr_t", ex.content[r])), stm_get_obj_size(ex.content[r]))) - def start_transaction(self): + def start_transaction(self, thread_num): assert self.transaction_state is None start_time = self.global_state.inc_and_get_global_time() - trs = TransactionState(start_time) + trs = TransactionState(start_time, thread_num) trs.update_from_committed( self.global_state.committed_transaction_state) self.transaction_state = trs @@ -291,6 +294,8 @@ if confl_set: contention_management(trs, other_trs, objs_in_conflict=confl_set) + if trs.check_must_abort(): + break if trs.check_must_abort(): self.ex.do('# write-read conflict: %s' % @@ -305,7 +310,7 @@ def op_start_transaction(ex, global_state, thread_state): - thread_state.start_transaction() + thread_state.start_transaction(ex.thread_num) # ex.do('self.start_transaction()') thread_state.reload_roots(ex) @@ -533,12 +538,13 @@ class TestRandom(BaseTest): + NB_THREADS = NB_SEGMENTS def test_fixed_16_bytes_objects(self, seed=1010): rnd = random.Random(seed) N_OBJECTS = 3 - N_THREADS = 2 + N_THREADS = self.NB_THREADS ex = Exec(self) ex.do("################################################################\n"*10) ex.do('# initialization') diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -279,7 +279,7 @@ stm_write(lp0) # privatize page with weakref in it too - assert stm_get_page_flag(stm_get_obj_pages(lp1)[0]) == PRIVATE_PAGE + assert stm_get_private_page(stm_get_obj_pages(lp1)[0]) != 0 assert stm_get_weakref(lp1) == lp0 self.commit_transaction() diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -7,7 +7,7 @@ char *filename = NULL; int interactive = 1; int i; - int num_threads = DEFAULT_NUM_THREADS; + int num_threads = STM_NB_SEGMENTS; for (i = 1; i < argc; ++i) { if (strcmp(argv[i], "--help") == 0) { diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -13,8 +13,6 @@ #endif -#define DEFAULT_NUM_THREADS 2 - extern __thread stm_thread_local_t stm_thread_local; struct DuObject_s { From noreply at buildbot.pypy.org Sun Mar 16 19:47:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 19:47:52 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/885ed3b0f6ee Message-ID: <20140316184752.C2C931C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69979:c561ed442ef2 Date: 2014-03-16 18:04 +0100 http://bitbucket.org/pypy/pypy/changeset/c561ed442ef2/ Log: import stmgc/885ed3b0f6ee diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -ddbc16971682 +885ed3b0f6ee diff --git a/rpython/translator/stm/src_stm/stm/contention.c b/rpython/translator/stm/src_stm/stm/contention.c --- a/rpython/translator/stm/src_stm/stm/contention.c +++ b/rpython/translator/stm/src_stm/stm/contention.c @@ -236,7 +236,7 @@ uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; if (prev_owner != 0 && prev_owner != STM_PSEGMENT->write_lock_num) { - uint8_t other_segment_num = prev_owner - 1; + uint8_t other_segment_num = prev_owner; assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -9,11 +9,34 @@ memset(write_locks, 0, sizeof(write_locks)); } +static void check_flag_write_barrier(object_t *obj) +{ + /* check that all copies of the object, apart from mine, have the + GCFLAG_WRITE_BARRIER. (a bit messy because it's possible that we + read a page in the middle of privatization by another thread) + */ +#ifndef NDEBUG + long i; + struct object_s *o1; + for (i = 0; i <= NB_SEGMENTS; i++) { + if (i == STM_SEGMENT->segment_num) + continue; + o1 = (struct object_s *)REAL_ADDRESS(get_segment_base(i), obj); + if (!(o1->stm_flags & GCFLAG_WRITE_BARRIER)) { + mutex_pages_lock(); /* try again... */ + if (!(o1->stm_flags & GCFLAG_WRITE_BARRIER)) + stm_fatalerror("missing GCFLAG_WRITE_BARRIER"); + mutex_pages_unlock(); + } + } +#endif +} void _stm_write_slowpath(object_t *obj) { assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); /* is this an object from the same transaction, outside the nursery? */ if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == @@ -30,6 +53,8 @@ safepoints that may be issued in write_write_contention_management(). */ stm_read(obj); + /* XXX XXX XXX make the logic of write-locking objects optional! */ + /* claim the write-lock for this object. In case we're running the same transaction since a long while, the object can be already in 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, @@ -60,12 +85,12 @@ the common case. Otherwise, we need to compute it based on its location and size. */ if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) { - pages_privatize(first_page, 1, true); + page_privatize(first_page); } else { char *realobj; size_t obj_size; - uintptr_t end_page; + uintptr_t i, end_page; /* get the size of the object */ realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); @@ -74,7 +99,9 @@ /* that's the page *following* the last page with the object */ end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; - pages_privatize(first_page, end_page - first_page, true); + for (i = first_page; i < end_page; i++) { + page_privatize(i); + } } } else if (write_locks[lock_idx] == lock_num) { @@ -101,19 +128,21 @@ LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); } - /* add the write-barrier-already-called flag ONLY if we succeeded in + /* check that we really have a private page */ + assert(is_private_page(STM_SEGMENT->segment_num, + ((uintptr_t)obj) / 4096)); + + /* check that so far all copies of the object have the flag */ + check_flag_write_barrier(obj); + + /* remove GCFLAG_WRITE_BARRIER, but only if we succeeded in getting the write-lock */ assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - /* for sanity, check that all other segment copies of this object - still have the flag */ - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - if (i != STM_SEGMENT->segment_num) - assert(((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) - ->stm_flags & GCFLAG_WRITE_BARRIER); - } + /* for sanity, check again that all other segment copies of this + object still have the flag (so privatization worked) */ + check_flag_write_barrier(obj); } static void reset_transaction_read_version(void) @@ -194,59 +223,130 @@ /************************************************************/ -#if NB_SEGMENTS != 2 -# error "The logic in the functions below only works with two segments" -#endif static bool detect_write_read_conflicts(void) { - long remote_num = 1 - STM_SEGMENT->segment_num; - char *remote_base = get_segment_base(remote_num); - uint8_t remote_version = get_segment(remote_num)->transaction_read_version; + /* Detect conflicts of the form: we want to commit a write to an object, + but the same object was also read in a different thread. + */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { - if (get_priv_segment(remote_num)->transaction_state == TS_NONE) - return false; /* no need to check */ + if (i == STM_SEGMENT->segment_num) + continue; - if (is_aborting_now(remote_num)) - return false; /* no need to check: is pending immediate abort */ + if (get_priv_segment(i)->transaction_state == TS_NONE) + continue; /* no need to check */ - LIST_FOREACH_R( - STM_PSEGMENT->modified_old_objects, - object_t * /*item*/, - ({ - if (was_read_remote(remote_base, item, remote_version)) { - /* A write-read conflict! */ - write_read_contention_management(remote_num); + if (is_aborting_now(i)) + continue; /* no need to check: is pending immediate abort */ - /* If we reach this point, we didn't abort, but maybe we - had to wait for the other thread to commit. If we - did, then we have to restart committing from our call - to synchronize_all_threads(). */ - return true; - } - })); + char *remote_base = get_segment_base(i); + uint8_t remote_version = get_segment(i)->transaction_read_version; + + LIST_FOREACH_R( + STM_PSEGMENT->modified_old_objects, + object_t * /*item*/, + ({ + if (was_read_remote(remote_base, item, remote_version)) { + /* A write-read conflict! */ + write_read_contention_management(i); + + /* If we reach this point, we didn't abort, but maybe we + had to wait for the other thread to commit. If we + did, then we have to restart committing from our call + to synchronize_all_threads(). */ + return true; + } + })); + } return false; } -static void synchronize_overflow_object_now(object_t *obj) +static void copy_object_to_shared(object_t *obj, int source_segment_num) { + /* Only used by major GC. XXX There is a lot of code duplication + with synchronize_object_now() but I don't completely see how to + improve... + */ + assert(_has_mutex_pages()); assert(!_is_young(obj)); - assert((obj->stm_flags & GCFLAG_SMALL_UNIFORM) == 0); + + char *segment_base = get_segment(source_segment_num)->segment_base; + uintptr_t start = (uintptr_t)obj; + uintptr_t first_page = start / 4096UL; + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(segment_base, obj); + + if (realobj->stm_flags & GCFLAG_SMALL_UNIFORM) { + abort();//XXX WRITE THE FAST CASE + } + else { + ssize_t obj_size = stmcb_size_rounded_up(realobj); + assert(obj_size >= 16); + uintptr_t end = start + obj_size; + uintptr_t last_page = (end - 1) / 4096UL; + + for (; first_page <= last_page; first_page++) { + + /* Copy the object into the shared page, if needed */ + if (is_private_page(source_segment_num, first_page)) { + + uintptr_t copy_size; + if (first_page == last_page) { + /* this is the final fragment */ + copy_size = end - start; + } + else { + /* this is a non-final fragment, going up to the + page's end */ + copy_size = 4096 - (start & 4095); + } + /* double-check that the result fits in one page */ + assert(copy_size > 0); + assert(copy_size + (start & 4095) <= 4096); + + char *src = REAL_ADDRESS(segment_base, start); + char *dst = REAL_ADDRESS(stm_object_pages, start); + if (copy_size == 4096) + pagecopy(dst, src); + else + memcpy(dst, src, copy_size); + } + + start = (start + 4096) & ~4095; + } + } +} + +static void synchronize_object_now(object_t *obj) +{ + /* Copy around the version of 'obj' that lives in our own segment. + It is first copied into the shared pages, and then into other + segments' own private pages. + + This must be called with the mutex_pages_lock! + */ + assert(_has_mutex_pages()); + assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - assert(obj_size >= 16); uintptr_t start = (uintptr_t)obj; - uintptr_t end = start + obj_size; uintptr_t first_page = start / 4096UL; - uintptr_t last_page = (end - 1) / 4096UL; - do { - if (flag_page_private[first_page] != SHARED_PAGE) { - /* The page is a PRIVATE_PAGE. We need to diffuse this fragment - of our object from our own segment to all other segments. */ + if (obj->stm_flags & GCFLAG_SMALL_UNIFORM) { + abort();//XXX WRITE THE FAST CASE + } + else { + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + assert(obj_size >= 16); + uintptr_t end = start + obj_size; + uintptr_t last_page = (end - 1) / 4096UL; + long i, myself = STM_SEGMENT->segment_num; + + for (; first_page <= last_page; first_page++) { uintptr_t copy_size; if (first_page == last_page) { @@ -254,26 +354,50 @@ copy_size = end - start; } else { - /* this is a non-final fragment, going up to the page's end */ + /* this is a non-final fragment, going up to the + page's end */ copy_size = 4096 - (start & 4095); } - /* double-check that the result fits in one page */ assert(copy_size > 0); assert(copy_size + (start & 4095) <= 4096); - long i; + /* First copy the object into the shared page, if needed */ char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); - for (i = 0; i < NB_SEGMENTS; i++) { - if (i != STM_SEGMENT->segment_num) { - char *dst = REAL_ADDRESS(get_segment_base(i), start); + char *dst = REAL_ADDRESS(stm_object_pages, start); + if (is_private_page(myself, first_page)) { + if (copy_size == 4096) + pagecopy(dst, src); + else memcpy(dst, src, copy_size); + } + else { + assert(memcmp(dst, src, copy_size) == 0); /* same page */ + } + + for (i = 1; i <= NB_SEGMENTS; i++) { + if (i == myself) + continue; + + src = REAL_ADDRESS(stm_object_pages, start); + dst = REAL_ADDRESS(get_segment_base(i), start); + if (is_private_page(i, first_page)) { + /* The page is a private page. We need to diffuse this + fragment of object from the shared page to this private + page. */ + if (copy_size == 4096) + pagecopy(dst, src); + else + memcpy(dst, src, copy_size); + } + else { + assert(memcmp(dst, src, copy_size) == 0); /* same page */ } } + + start = (start + 4096) & ~4095; } - - start = (start + 4096) & ~4095; - } while (first_page++ < last_page); + } } static void push_overflow_objects_from_privatized_pages(void) @@ -282,27 +406,15 @@ return; LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, - synchronize_overflow_object_now(item)); + synchronize_object_now(item)); } static void push_modified_to_other_segments(void) { - long remote_num = 1 - STM_SEGMENT->segment_num; - char *local_base = STM_SEGMENT->segment_base; - char *remote_base = get_segment_base(remote_num); - bool remote_active = - (get_priv_segment(remote_num)->transaction_state != TS_NONE && - get_segment(remote_num)->nursery_end != NSE_SIGABORT); - LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, ({ - if (remote_active) { - assert(!was_read_remote(remote_base, item, - get_segment(remote_num)->transaction_read_version)); - } - /* clear the write-lock (note that this runs with all other threads paused, so no need to be careful about ordering) */ uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; @@ -314,11 +426,9 @@ minor_collection() */ assert((item->stm_flags & GCFLAG_WRITE_BARRIER) != 0); - /* copy the modified object to the other segment */ - char *src = REAL_ADDRESS(local_base, item); - char *dst = REAL_ADDRESS(remote_base, item); - ssize_t size = stmcb_size_rounded_up((struct object_s *)src); - memcpy(dst, src, size); + /* copy the object to the shared page, and to the other + private pages as needed */ + synchronize_object_now(item); })); list_clear(STM_PSEGMENT->modified_old_objects); @@ -369,10 +479,12 @@ major_collection_now_at_safe_point(); /* synchronize overflow objects living in privatized pages */ + mutex_pages_lock(); push_overflow_objects_from_privatized_pages(); /* synchronize modified old objects to other threads */ push_modified_to_other_segments(); + mutex_pages_unlock(); /* update 'overflow_number' if needed */ if (STM_PSEGMENT->overflow_number_has_been_used) { @@ -490,6 +602,7 @@ static void abort_with_mutex(void) { + assert(_has_mutex()); dprintf(("~~~ ABORT\n")); switch (STM_PSEGMENT->transaction_state) { @@ -515,8 +628,11 @@ /* invoke the callbacks */ invoke_and_clear_callbacks_on_abort(); - if (STM_SEGMENT->nursery_end == NSE_SIGABORT) - STM_SEGMENT->nursery_end = NURSERY_END; /* done aborting */ + if (STM_SEGMENT->nursery_end == NSE_SIGABORT) { + /* done aborting */ + STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE + : NURSERY_END; + } _finish_transaction(); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -16,12 +16,12 @@ #define NB_PAGES (1500*256) // 1500MB -#define NB_SEGMENTS 2 +#define NB_SEGMENTS STM_NB_SEGMENTS #define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) #define NB_NURSERY_PAGES (STM_GC_NURSERY/4) -#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_SEGMENTS) +#define TOTAL_MEMORY (NB_PAGES * 4096UL * (1 + NB_SEGMENTS)) #define READMARKER_END ((NB_PAGES * 4096UL) >> 4) #define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) #define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE @@ -179,10 +179,6 @@ static char *stm_object_pages; static stm_thread_local_t *stm_all_thread_locals = NULL; -#ifdef STM_TESTS -static char *stm_other_pages; -#endif - static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START]; @@ -229,4 +225,5 @@ asm("/* workaround for llvm bug */"); } -static void synchronize_overflow_object_now(object_t *obj); +static void copy_object_to_shared(object_t *obj, int source_segment_num); +static void synchronize_object_now(object_t *obj); diff --git a/rpython/translator/stm/src_stm/stm/fprintcolor.h b/rpython/translator/stm/src_stm/stm/fprintcolor.h --- a/rpython/translator/stm/src_stm/stm/fprintcolor.h +++ b/rpython/translator/stm/src_stm/stm/fprintcolor.h @@ -10,7 +10,7 @@ #define dprintf(args) threadcolor_printf args static inline int dprintfcolor(void) { - return 31 + STM_SEGMENT->segment_num % 6; + return 31 + (STM_SEGMENT->segment_num + 5) % 6; } static int threadcolor_printf(const char *format, ...) diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -193,129 +193,121 @@ /************************************************************/ +static uintptr_t object_last_page(object_t *obj) +{ + uintptr_t lastbyte; + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); -static inline void mark_single_flag_private(uintptr_t pagenum) -{ - if (flag_page_private[pagenum] == PRIVATE_PAGE) { - assert(pagenum >= END_NURSERY_PAGE); - assert(pagenum < NB_PAGES); - flag_page_private[pagenum] = SEGMENT1_PAGE; + if (realobj->stm_flags & GCFLAG_SMALL_UNIFORM) { + lastbyte = (uintptr_t)obj; } else { - assert(flag_page_private[pagenum] == SHARED_PAGE || - flag_page_private[pagenum] == SEGMENT1_PAGE); + /* get the size of the object */ + size_t obj_size = stmcb_size_rounded_up(realobj); + + /* that's the last byte within the object */ + lastbyte = ((uintptr_t)obj) + obj_size - 1; } + return lastbyte / 4096UL; } -static inline void mark_flag_page_private(object_t *obj, char *segment_base) +/* A macro that expands to: run the 'expression' for every page that + touches objects in the 'modified_old_objects' list. +*/ +#define BITOP(expression) \ + LIST_FOREACH_R( \ + get_priv_segment(segment_num)->modified_old_objects, \ + object_t * /* item */, \ + ({ \ + struct page_shared_s *ps; \ + uintptr_t pagenum = ((uintptr_t)item) / 4096UL; \ + uintptr_t count = object_last_page(item) - pagenum; \ + ps = &pages_privatized[pagenum - PAGE_FLAG_START]; \ + do { \ + expression; \ + ps++; \ + } while (count--); \ + })); + +static void major_hide_private_bits_for_modified_objects(long segment_num) { - uintptr_t first_page = ((uintptr_t)obj) / 4096UL; - - if (LIKELY((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0)) { - mark_single_flag_private(first_page); - } - else { - char *realobj; - size_t obj_size; - uintptr_t end_page; - - /* get the size of the object */ - realobj = REAL_ADDRESS(segment_base, obj); - obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - - /* that's the page *following* the last page with the object */ - end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; - - while (first_page < end_page) - mark_single_flag_private(first_page++); - } + uint64_t negativebitmask = ~(1 << (segment_num - 1)); +#ifndef NDEBUG + BITOP(assert((ps->by_segment & negativebitmask) != ps->by_segment)); +#endif + BITOP(ps->by_segment &= negativebitmask); } -static void major_reshare_pages_range(uintptr_t first_page, uintptr_t end_page) +static void major_restore_private_bits_for_modified_objects(long segment_num) { - uintptr_t i; - for (i = first_page; i < end_page; i++) { + uint64_t positivebitmask = 1 << (segment_num - 1); + BITOP(ps->by_segment |= positivebitmask); +} - switch (flag_page_private[i]) { - - case SEGMENT1_PAGE: - /* this page stays private after major collection */ - flag_page_private[i] = PRIVATE_PAGE; - break; - - case PRIVATE_PAGE:; - /* this page becomes shared again. No object in it was - traced belonging to a segment other than 0. - - XXX This is maybe a too-strict condition, but the more - general condition "all traced objects belong to the same - segment" has problems with large objects in segments > 0. - More precisely: we'd need to keep in the shared page the - content of the objects (from segment > 0), but also the - largemalloc's chunk data (stored in segment 0). - */ -#if NB_SEGMENTS != 2 -# error "limited to NB_SEGMENTS == 2" -#endif - char *ppage0 = get_segment_base(0) + i * 4096; - char *ppage1 = get_segment_base(1) + i * 4096; - - /* two cases for mapping pages to file-pages (fpages): - - (0->0, 1->1) - - (0->1, 1->0) - Distinguish which case it is by hacking a lot */ - - // 0->0,1->1 or 0->1,1->0 - /* map page 1 to fpage 0: */ - d_remap_file_pages(ppage1, 4096, i); - // 0->0,1->0 or 0->1,1->0 - - char oldvalue0 = *ppage0; - char oldvalue1 = *ppage1; - asm("":::"memory"); - *ppage0 = 1 + oldvalue1; - asm("":::"memory"); - char newvalue1 = *ppage1; - asm("":::"memory"); - *ppage0 = oldvalue0; - /* if we are in 0->0,1->0, old and new are different: - In this case we are done. We keep the largemalloc - data structure and objects of ppage0/fpage0 */ - if (oldvalue1 == newvalue1) { - // 0->1,1->0 - /* ppage0/fpage1 has the data structure that we want - in ppage1/fpage0, so we copy it */ - pagecopy(ppage1, ppage0); // copy from page0 to page1, - // i.e. from the underlying memory seg1 to seg0 - d_remap_file_pages(ppage0, 4096, i); - // 0->0,1->0 - } - flag_page_private[i] = SHARED_PAGE; - - increment_total_allocated(-4096 * (NB_SEGMENTS-1)); - break; - - case SHARED_PAGE: - break; /* stay shared */ - - default: - assert(!"unexpected flag_page_private"); - } - } -} +#undef BITOP static void major_reshare_pages(void) { /* re-share pages if possible. Each re-sharing decreases total_allocated by 4096. */ - major_reshare_pages_range( - END_NURSERY_PAGE, /* not the nursery! */ - (uninitialized_page_start - stm_object_pages) / 4096UL); - major_reshare_pages_range( - (uninitialized_page_stop - stm_object_pages) / 4096UL, - NB_PAGES); + + long i; + mutex_pages_lock(); + + for (i = 1; i <= NB_SEGMENTS; i++) { + /* The 'modified_old_objects' list gives the list of objects + whose pages need to remain private. We temporarily remove + these bits from 'pages_privatized', so that these pages will + be skipped by the loop below (and by copy_object_to_shared()). + */ + major_hide_private_bits_for_modified_objects(i); + + /* For each segment, push the current overflow objects from + private pages to the corresponding shared pages, if + necessary. The pages that we will re-share must contain this + data; otherwise, it would exist only in the private pages, + and get lost in the loop below. + */ + struct list_s *lst = get_priv_segment(i)->large_overflow_objects; + if (lst != NULL) { + LIST_FOREACH_R(lst, object_t *, copy_object_to_shared(item, i)); + } + } + + /* Now loop over all pages that are still in 'pages_privatized', + and re-share them. + */ + uintptr_t pagenum, endpagenum; + pagenum = END_NURSERY_PAGE; /* starts after the nursery */ + endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; + + while (1) { + if (UNLIKELY(pagenum == endpagenum)) { + /* we reach this point usually twice, because there are + more pages after 'uninitialized_page_stop' */ + if (endpagenum == NB_PAGES) + break; /* done */ + pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; + endpagenum = NB_PAGES; + if (pagenum == endpagenum) + break; /* no pages in the 2nd section, so done too */ + } + + page_check_and_reshare(pagenum); + pagenum++; + } + + /* Done. Now 'pages_privatized' should be entirely zeroes. Restore + the previously-hidden bits + */ + for (i = 1; i <= NB_SEGMENTS; i++) { + major_restore_private_bits_for_modified_objects(i); + } + mutex_pages_unlock(); } + /************************************************************/ @@ -324,11 +316,6 @@ /* takes a normal pointer to a thread-local pointer to an object */ object_t *obj = *pobj; - if (obj == NULL || mark_visited_test_and_set(obj)) - return; /* already visited this object */ - - LIST_APPEND(mark_objects_to_trace, obj); - /* Note: this obj might be visited already, but from a different segment. We ignore this case and skip re-visiting the object anyway. The idea is that such an object is old (not from the @@ -339,6 +326,10 @@ segments and only needs visiting once. (It may actually be in a shared page, or maybe not.) */ + if (obj == NULL || mark_visited_test_and_set(obj)) + return; /* already visited this object */ + + LIST_APPEND(mark_objects_to_trace, obj); } static void mark_trace(object_t *obj, char *segment_base) @@ -346,13 +337,6 @@ assert(list_is_empty(mark_objects_to_trace)); while (1) { - - /* first, if we're not seeing segment 0, we must change the - flags in flag_page_private[] from PRIVATE_PAGE to - SEGMENT1_PAGE, which will mean "can't re-share" */ - if (segment_base != stm_object_pages && RESHARE_PAGES) - mark_flag_page_private(obj, segment_base); - /* trace into the object (the version from 'segment_base') */ struct object_s *realobj = (struct object_s *)REAL_ADDRESS(segment_base, obj); @@ -374,45 +358,33 @@ static void mark_visit_from_roots(void) { - if (testing_prebuilt_objs != NULL) { LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/, - mark_visit_object(item, get_segment_base(0))); + mark_visit_object(item, stm_object_pages)); } - /* Do the following twice, so that we trace first the objects from - segment 0, and then all others. XXX This is a hack to make it - more likely that we'll be able to re-share pages. */ + stm_thread_local_t *tl = stm_all_thread_locals; + do { + /* If 'tl' is currently running, its 'associated_segment_num' + field is the segment number that contains the correct + version of its overflowed objects. If not, then the + field is still some correct segment number, and it doesn't + matter which one we pick. */ + char *segment_base = get_segment_base(tl->associated_segment_num); - int must_be_zero; - for (must_be_zero = 1; must_be_zero >= 0; must_be_zero--) { + struct stm_shadowentry_s *current = tl->shadowstack; + struct stm_shadowentry_s *base = tl->shadowstack_base; + while (current-- != base) { + assert(current->ss != (object_t *)-1); + mark_visit_object(current->ss, segment_base); + } + mark_visit_object(tl->thread_local_obj, segment_base); - stm_thread_local_t *tl = stm_all_thread_locals; - do { - /* If 'tl' is currently running, its 'associated_segment_num' - field is the segment number that contains the correct - version of its overflowed objects. If not, then the - field is still some correct segment number, and it doesn't - matter which one we pick. */ - char *segment_base = get_segment_base(tl->associated_segment_num); - - if (must_be_zero == (segment_base == get_segment_base(0))) { - - struct stm_shadowentry_s *current = tl->shadowstack; - struct stm_shadowentry_s *base = tl->shadowstack_base; - while (current-- != base) { - assert(current->ss != (object_t *)-1); - mark_visit_object(current->ss, segment_base); - } - mark_visit_object(tl->thread_local_obj, segment_base); - } - - tl = tl->next; - } while (tl != stm_all_thread_locals); - } + tl = tl->next; + } while (tl != stm_all_thread_locals); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (get_priv_segment(i)->transaction_state != TS_NONE) mark_visit_object( get_priv_segment(i)->threadlocal_at_start_of_transaction, @@ -423,20 +395,21 @@ static void mark_visit_from_modified_objects(void) { /* The modified objects are the ones that may exist in two different - versions: one in the segment that modified it, and another in - all other segments. */ + versions: one in the segment that modified it, and another in all + other segments. (It can also be more than two if we don't have + eager write locking.) + */ long i; - for (i = 0; i < NB_SEGMENTS; i++) { - char *base1 = get_segment_base(i); /* two different segments */ - char *base2 = get_segment_base(!i); + for (i = 1; i <= NB_SEGMENTS; i++) { + char *base = get_segment_base(i); LIST_FOREACH_R( get_priv_segment(i)->modified_old_objects, object_t * /*item*/, ({ mark_visited_test_and_set(item); - mark_trace(item, base1); - mark_trace(item, base2); + mark_trace(item, stm_object_pages); /* shared version */ + mark_trace(item, base); /* private version */ })); } } @@ -444,7 +417,7 @@ static void clean_up_segment_lists(void) { long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); struct list_s *lst; @@ -490,7 +463,9 @@ static void sweep_large_objects(void) { + mutex_pages_lock(); _stm_largemalloc_sweep(); + mutex_pages_unlock(); } static void clean_write_locks(void) @@ -506,11 +481,11 @@ memset(write_locks + lock2_idx, 0, sizeof(write_locks) - lock2_idx); } -static void major_set_write_locks(void) +static void major_restore_write_locks(void) { /* restore the write locks on the modified objects */ long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); LIST_FOREACH_R( @@ -536,6 +511,10 @@ dprintf((" | used before collection: %ld\n", (long)pages_ctl.total_allocated)); + /* reshare pages */ + if (RESHARE_PAGES) + major_reshare_pages(); + /* marking */ LIST_CREATE(mark_objects_to_trace); mark_visit_from_modified_objects(); @@ -549,15 +528,11 @@ clean_up_segment_lists(); /* sweeping */ - mutex_pages_lock(); - if (RESHARE_PAGES) - major_reshare_pages(); sweep_large_objects(); //sweep_uniform_pages(); - mutex_pages_unlock(); clean_write_locks(); - major_set_write_locks(); + major_restore_write_locks(); dprintf((" | used after collection: %ld\n", (long)pages_ctl.total_allocated)); diff --git a/rpython/translator/stm/src_stm/stm/gcpage.h b/rpython/translator/stm/src_stm/stm/gcpage.h --- a/rpython/translator/stm/src_stm/stm/gcpage.h +++ b/rpython/translator/stm/src_stm/stm/gcpage.h @@ -16,7 +16,7 @@ #define GC_MAJOR_COLLECT 1.82 /* re-share pages after major collections (1 or 0) */ -#define RESHARE_PAGES 0 +#define RESHARE_PAGES 1 diff --git a/rpython/translator/stm/src_stm/stm/misc.c b/rpython/translator/stm/src_stm/stm/misc.c --- a/rpython/translator/stm/src_stm/stm/misc.c +++ b/rpython/translator/stm/src_stm/stm/misc.c @@ -42,9 +42,10 @@ } #ifdef STM_TESTS -uint8_t _stm_get_page_flag(uintptr_t index) +uintptr_t _stm_get_private_page(uintptr_t pagenum) { - return flag_page_private[index]; + /* xxx returns 0 or 1 now */ + return is_private_page(STM_SEGMENT->segment_num, pagenum); } long _stm_count_modified_old_objects(void) @@ -80,4 +81,14 @@ mutex_pages_unlock(); return result; } + +void _stm_mutex_pages_lock(void) +{ + mutex_pages_lock(); +} + +void _stm_mutex_pages_unlock(void) +{ + mutex_pages_unlock(); +} #endif diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -27,7 +27,7 @@ _stm_nursery_start = NURSERY_START; long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { get_segment(i)->nursery_current = (stm_char *)NURSERY_START; get_segment(i)->nursery_end = NURSERY_END; } @@ -199,8 +199,11 @@ WRITE_BARRIER flag and traced into it to fix its content); or add the object to 'large_overflow_objects'. */ - if (STM_PSEGMENT->minor_collect_will_commit_now) - synchronize_overflow_object_now(obj); + if (STM_PSEGMENT->minor_collect_will_commit_now) { + mutex_pages_lock(); + synchronize_object_now(obj); + mutex_pages_unlock(); + } else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } @@ -379,7 +382,7 @@ _stm_nursery_start = NURSERY_END - free_count; long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if ((uintptr_t)get_segment(i)->nursery_current < _stm_nursery_start) get_segment(i)->nursery_current = (stm_char *)_stm_nursery_start; } @@ -412,7 +415,7 @@ int original_num = STM_SEGMENT->segment_num; long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); if (MINOR_NOTHING_TO_DO(pseg)) /*TS_NONE segments have NOTHING_TO_DO*/ continue; diff --git a/rpython/translator/stm/src_stm/stm/pages.c b/rpython/translator/stm/src_stm/stm/pages.c --- a/rpython/translator/stm/src_stm/stm/pages.c +++ b/rpython/translator/stm/src_stm/stm/pages.c @@ -26,6 +26,7 @@ static void teardown_pages(void) { memset(&pages_ctl, 0, sizeof(pages_ctl)); + memset(pages_privatized, 0, sizeof(pages_privatized)); } static void mutex_pages_lock(void) @@ -40,7 +41,6 @@ __sync_lock_release(&pages_ctl.mutex_pages); } -__attribute__((unused)) static bool _has_mutex_pages(void) { return pages_ctl.mutex_pages != 0; @@ -48,6 +48,7 @@ static uint64_t increment_total_allocated(ssize_t add_or_remove) { + assert(_has_mutex_pages()); pages_ctl.total_allocated += add_or_remove; if (pages_ctl.total_allocated >= pages_ctl.total_allocated_bound) @@ -103,100 +104,64 @@ segment 0. */ uintptr_t i; assert(_has_mutex_pages()); - for (i = 1; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); d_remap_file_pages(segment_base + pagenum * 4096UL, count * 4096UL, pagenum); } - for (i = 0; i < count; i++) - flag_page_private[pagenum + i] = SHARED_PAGE; } -#if 0 -static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count) +static void page_privatize(uintptr_t pagenum) { - /* Same as pages_initialize_shared(), but tries hard to minimize the - total number of pages that remap_file_pages() must handle, by - fragmenting calls as much as possible (the overhead of one system - call appears smaller as the overhead per page). */ - uintptr_t start, i = 0; - while (i < count) { - if (flag_page_private[pagenum + (i++)] == SHARED_PAGE) - continue; - start = i; /* first index of a private page */ - while (1) { - i++; - if (i == count || flag_page_private[pagenum + i] == SHARED_PAGE) - break; - } - pages_initialize_shared(pagenum + start, i - start); - } -} -#endif - -static void privatize_range(uintptr_t pagenum, uintptr_t count, bool full) -{ - ssize_t pgoff1 = pagenum; - ssize_t pgoff2 = pagenum + NB_PAGES; - ssize_t localpgoff = pgoff1 + NB_PAGES * STM_SEGMENT->segment_num; - ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - STM_SEGMENT->segment_num); - - void *localpg = stm_object_pages + localpgoff * 4096UL; - void *otherpg = stm_object_pages + otherpgoff * 4096UL; - - memset(flag_page_private + pagenum, REMAPPING_PAGE, count); - d_remap_file_pages(localpg, count * 4096, pgoff2); - uintptr_t i; - if (full) { - for (i = 0; i < count; i++) { - pagecopy(localpg + 4096 * i, otherpg + 4096 * i); - } - } - else { - pagecopy(localpg, otherpg); - if (count > 1) - pagecopy(localpg + 4096 * (count-1), otherpg + 4096 * (count-1)); - } - write_fence(); - memset(flag_page_private + pagenum, PRIVATE_PAGE, count); - increment_total_allocated(4096 * count); -} - -static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) -{ - /* narrow the range of pages to privatize from the end: */ - while (flag_page_private[pagenum + count - 1] == PRIVATE_PAGE) { - if (!--count) - return; + if (is_private_page(STM_SEGMENT->segment_num, pagenum)) { + /* the page is already privatized */ + return; } + /* lock, to prevent concurrent threads from looking up this thread's + 'pages_privatized' bits in parallel */ mutex_pages_lock(); - uintptr_t page_start_range = pagenum; - uintptr_t pagestop = pagenum + count; + /* "unmaps" the page to make the address space location correspond + again to its underlying file offset (XXX later we should again + attempt to group together many calls to d_remap_file_pages() in + succession) */ + uintptr_t pagenum_in_file = NB_PAGES * STM_SEGMENT->segment_num + pagenum; + char *new_page = stm_object_pages + pagenum_in_file * 4096UL; + d_remap_file_pages(new_page, 4096, pagenum_in_file); + increment_total_allocated(4096); - for (; pagenum < pagestop; pagenum++) { - uint8_t prev = flag_page_private[pagenum]; - if (prev == PRIVATE_PAGE) { - if (pagenum > page_start_range) { - privatize_range(page_start_range, - pagenum - page_start_range, full); - } - page_start_range = pagenum + 1; - } - else { - assert(prev == SHARED_PAGE); - } - } + /* copy the content from the shared (segment 0) source */ + pagecopy(new_page, stm_object_pages + pagenum * 4096UL); - if (pagenum > page_start_range) { - privatize_range(page_start_range, - pagenum - page_start_range, full); - } + /* add this thread's 'pages_privatized' bit */ + uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); + pages_privatized[pagenum - PAGE_FLAG_START].by_segment |= bitmask; mutex_pages_unlock(); } +static void page_reshare(uintptr_t pagenum) +{ + struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; + pages_privatized[pagenum - PAGE_FLAG_START].by_segment = 0; + + long j, total = 0; + for (j = 0; j < NB_SEGMENTS; j++) { + if (ps.by_segment & (1 << j)) { + /* Page 'pagenum' is private in segment 'j + 1'. Reshare */ + char *segment_base = stm_object_pages + NB_PAGES * 4096UL * (j+1); + + madvise(segment_base + pagenum * 4096UL, 4096, MADV_DONTNEED); + d_remap_file_pages(segment_base + pagenum * 4096UL, + 4096, pagenum); + total -= 4096; + } + } + increment_total_allocated(total); +} + + #if 0 static bool is_fully_in_shared_pages(object_t *obj) { diff --git a/rpython/translator/stm/src_stm/stm/pages.h b/rpython/translator/stm/src_stm/stm/pages.h --- a/rpython/translator/stm/src_stm/stm/pages.h +++ b/rpython/translator/stm/src_stm/stm/pages.h @@ -1,49 +1,62 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ -enum /* flag_page_private */ { - /* The page is not in use. Assume that each segment sees its own copy. */ - FREE_PAGE=0, +/* This handles pages of objects outside the nursery. Every page + has a "shared copy" and zero or more "private copies". - /* The page is shared by all segments. Each segment sees the same - physical page (the one that is within the segment 0 mmap address). */ - SHARED_PAGE, + The shared copy of a page is stored in the mmap at the file offset + corresponding to the segment 0 offset. Initially, accessing a page + from segment N remaps to segment 0. If the page is turned private, + then we "un-remap" it to its initial location. The 'pages_privatized' + global array records if a page is currently mapped to segment 0 + (shared page) or to its natural location (private page). - /* For only one range of pages at a time, around the call to - remap_file_pages() that un-shares the pages (SHARED -> PRIVATE). */ - REMAPPING_PAGE, + Note that this page manipulation logic uses remap_file_pages() to + fully hide its execution cost behind the CPU's memory management unit. + It should not be confused with the logic of tracking which objects + are old-and-committed, old-but-modified, overflow objects, and so on + (which works at the object granularity, not the page granularity). +*/ - /* Page is private for each segment. */ - PRIVATE_PAGE, +#define PAGE_FLAG_START END_NURSERY_PAGE +#define PAGE_FLAG_END NB_PAGES - /* gcpage.c: page contains objects that have been traced in the - segment > 0 */ - SEGMENT1_PAGE, +struct page_shared_s { +#if NB_SEGMENTS <= 8 + uint8_t by_segment; +#elif NB_SEGMENTS <= 16 + uint16_t by_segment; +#elif NB_SEGMENTS <= 32 + uint32_t by_segment; +#elif NB_SEGMENTS <= 64 + uint64_t by_segment; +#else +# error "NB_SEGMENTS > 64 not supported right now" +#endif }; -static uint8_t flag_page_private[NB_PAGES]; +static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START]; -static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full); static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); -//static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count); +static void page_privatize(uintptr_t pagenum); +static void page_reshare(uintptr_t pagenum); static void mutex_pages_lock(void); static void mutex_pages_unlock(void); +static bool _has_mutex_pages(void) __attribute__((unused)); static uint64_t increment_total_allocated(ssize_t add_or_remove); static bool is_major_collection_requested(void); static void force_major_collection_request(void); static void reset_major_collection_requested(void); -inline static void pages_privatize(uintptr_t pagenum, uintptr_t count, - bool full) { - /* This is written a bit carefully so that a call with a constant - count == 1 will turn this loop into just one "if". */ - while (flag_page_private[pagenum] == PRIVATE_PAGE) { - if (!--count) { - return; - } - pagenum++; - } - _pages_privatize(pagenum, count, full); +static inline bool is_private_page(long segnum, uintptr_t pagenum) +{ + assert(pagenum >= PAGE_FLAG_START); + uint64_t bitmask = 1UL << (segnum - 1); + return (pages_privatized[pagenum - PAGE_FLAG_START].by_segment & bitmask); } -/* static bool is_fully_in_shared_pages(object_t *obj); */ +static inline void page_check_and_reshare(uintptr_t pagenum) +{ + if (pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0) + page_reshare(pagenum); +} diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -27,12 +27,15 @@ if (stm_object_pages == MAP_FAILED) stm_fatalerror("initial stm_object_pages mmap() failed: %m\n"); + /* The segment 0 is not used to run transactions, but to contain the + shared copy of the pages. We mprotect all pages before so that + accesses fail, up to and including the pages corresponding to the + nurseries of the other segments. */ + mprotect(stm_object_pages, END_NURSERY_PAGE * 4096UL, PROT_NONE); + long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); -#ifdef STM_TESTS - stm_other_pages = segment_base; -#endif /* In each segment, the first page is where TLPREFIX'ed NULL accesses land. We mprotect it so that accesses fail. */ @@ -40,7 +43,7 @@ /* Fill the TLS page (page 1) with 0xDC, for debugging */ memset(REAL_ADDRESS(segment_base, 4096), 0xDC, 4096); - /* Make a "hole" at STM_PSEGMENT */ + /* Make a "hole" at STM_PSEGMENT (which includes STM_SEGMENT) */ memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, sizeof(*STM_PSEGMENT)); @@ -50,9 +53,10 @@ (FIRST_READMARKER_PAGE - 2) * 4096UL, PROT_NONE); + /* Initialize STM_PSEGMENT */ struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(i + 1 < 255); /* 255 is WL_VISITED in gcpage.c */ - pr->write_lock_num = i + 1; + assert(1 <= i && i < 255); /* 255 is WL_VISITED in gcpage.c */ + pr->write_lock_num = i; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; pr->objects_pointing_to_nursery = NULL; @@ -63,7 +67,7 @@ pr->young_outside_nursery = tree_create(); pr->nursery_objects_shadows = tree_create(); pr->callbacks_on_abort = tree_create(); - pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * (i + 1); + pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; } @@ -74,10 +78,6 @@ STM_SEGMENT->transaction_read_version never contains zero, so a null read marker means "not read" whatever the current transaction_read_version is. - - The creation markers are initially zero, which is correct: - it means "objects of this line of 256 bytes have not been - allocated by the current transaction." */ setup_sync(); @@ -93,7 +93,7 @@ assert(!_has_mutex()); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); @@ -108,8 +108,6 @@ munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; - memset(flag_page_private, 0, sizeof(flag_page_private)); - teardown_core(); teardown_sync(); teardown_gcpage(); @@ -147,14 +145,14 @@ tl->prev = stm_all_thread_locals->prev; stm_all_thread_locals->prev->next = tl; stm_all_thread_locals->prev = tl; - num = tl->prev->associated_segment_num + 1; + num = tl->prev->associated_segment_num; } tl->thread_local_obj = NULL; /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own numbers automatically. */ - num = num % NB_SEGMENTS; + num = (num % NB_SEGMENTS) + 1; tl->associated_segment_num = num; _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); diff --git a/rpython/translator/stm/src_stm/stm/sync.c b/rpython/translator/stm/src_stm/stm/sync.c --- a/rpython/translator/stm/src_stm/stm/sync.c +++ b/rpython/translator/stm/src_stm/stm/sync.c @@ -31,7 +31,7 @@ pthread_mutex_t global_mutex; pthread_cond_t cond[_C_TOTAL]; /* some additional pieces of global state follow */ - uint8_t in_use[NB_SEGMENTS]; /* 1 if running a pthread */ + uint8_t in_use1[NB_SEGMENTS]; /* 1 if running a pthread */ uint64_t global_time; }; char reserved[192]; @@ -61,7 +61,7 @@ stm_fatalerror("cond destroy: %m\n"); } - memset(&sync_ctl, 0, sizeof(sync_ctl.in_use)); + memset(&sync_ctl, 0, sizeof(sync_ctl)); } #ifndef NDEBUG @@ -125,12 +125,12 @@ { long i; restart: - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { if (can_abort) { /* handle this case like a contention: it will either abort us (not the other thread, which is inevitable), - or for a while. If we go past this call, then we + or wait for a while. If we go past this call, then we waited; in this case we have to re-check if no other thread is inevitable. */ inevitable_contention_management(i); @@ -153,7 +153,7 @@ assert(_is_tl_registered(tl)); int num = tl->associated_segment_num; - if (sync_ctl.in_use[num] == 0) { + if (sync_ctl.in_use1[num - 1] == 0) { /* fast-path: we can get the same segment number than the one we had before. The value stored in GS is still valid. */ #ifdef STM_TESTS @@ -166,10 +166,10 @@ } /* Look for the next free segment. If there is none, wait for the condition variable. */ - int i; - for (i = 0; i < NB_SEGMENTS; i++) { - num = (num + 1) % NB_SEGMENTS; - if (sync_ctl.in_use[num] == 0) { + int retries; + for (retries = 0; retries < NB_SEGMENTS; retries++) { + num = (num % NB_SEGMENTS) + 1; + if (sync_ctl.in_use1[num - 1] == 0) { /* we're getting 'num', a different number. */ dprintf(("acquired different segment: %d->%d\n", tl->associated_segment_num, num)); tl->associated_segment_num = num; @@ -185,7 +185,7 @@ return false; got_num: - sync_ctl.in_use[num] = 1; + sync_ctl.in_use1[num - 1] = 1; assert(STM_SEGMENT->segment_num == num); assert(STM_SEGMENT->running_thread == NULL); STM_SEGMENT->running_thread = tl; @@ -209,8 +209,8 @@ assert(STM_SEGMENT->running_thread == tl); STM_SEGMENT->running_thread = NULL; - assert(sync_ctl.in_use[tl->associated_segment_num] == 1); - sync_ctl.in_use[tl->associated_segment_num] = 0; + assert(sync_ctl.in_use1[tl->associated_segment_num - 1] == 1); + sync_ctl.in_use1[tl->associated_segment_num - 1] = 0; } __attribute__((unused)) @@ -222,7 +222,7 @@ bool _stm_in_transaction(stm_thread_local_t *tl) { int num = tl->associated_segment_num; - assert(num < NB_SEGMENTS); + assert(1 <= num && num <= NB_SEGMENTS); return get_segment(num)->running_thread == tl; } @@ -261,12 +261,15 @@ { assert(_safe_points_requested == false); assert((_safe_points_requested = true, 1)); + assert(_has_mutex()); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (get_segment(i)->nursery_end == NURSERY_END) get_segment(i)->nursery_end = NSE_SIGPAUSE; } + assert(!pause_signalled); + pause_signalled = true; } static inline long count_other_threads_sp_running(void) @@ -277,7 +280,7 @@ long result = 0; int my_num = STM_SEGMENT->segment_num; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { if (i != my_num && get_priv_segment(i)->safe_point == SP_RUNNING) { assert(get_segment(i)->nursery_end <= _STM_NSE_SIGNAL_MAX); result++; @@ -288,11 +291,13 @@ static void remove_requests_for_safe_point(void) { + assert(pause_signalled); + pause_signalled = false; assert(_safe_points_requested == true); assert((_safe_points_requested = false, 1)); long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { assert(get_segment(i)->nursery_end != NURSERY_END); if (get_segment(i)->nursery_end == NSE_SIGPAUSE) get_segment(i)->nursery_end = NURSERY_END; diff --git a/rpython/translator/stm/src_stm/stm/sync.h b/rpython/translator/stm/src_stm/stm/sync.h --- a/rpython/translator/stm/src_stm/stm/sync.h +++ b/rpython/translator/stm/src_stm/stm/sync.h @@ -31,3 +31,5 @@ static void wait_for_end_of_inevitable_transaction(bool can_abort); static void synchronize_all_threads(void); + +static bool pause_signalled; diff --git a/rpython/translator/stm/src_stm/stm/weakref.c b/rpython/translator/stm/src_stm/stm/weakref.c --- a/rpython/translator/stm/src_stm/stm/weakref.c +++ b/rpython/translator/stm/src_stm/stm/weakref.c @@ -33,23 +33,18 @@ ssize_t size = 16; stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size); - if (flag_page_private[(uintptr_t)point_to_loc / 4096UL] == PRIVATE_PAGE) { - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - char *base = get_segment_base(i); /* two different segments */ - object_t ** ref_loc = (object_t **)REAL_ADDRESS(base, point_to_loc); - *ref_loc = value; - } - } - else { - *WEAKREF_PTR(weakref, size) = value; + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *base = get_segment_base(i); + object_t ** ref_loc = (object_t **)REAL_ADDRESS(base, point_to_loc); + *ref_loc = value; } } /***** Minor collection *****/ -static void stm_move_young_weakrefs() +static void stm_move_young_weakrefs(void) { /* The code relies on the fact that no weakref can be an old object weakly pointing to a young object. Indeed, weakrefs are immutable @@ -116,7 +111,7 @@ static void stm_visit_old_weakrefs(void) { long i; - for (i = 0; i < NB_SEGMENTS; i++) { + for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); struct list_s *lst; diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -88,7 +88,7 @@ #include bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); -uint8_t _stm_get_page_flag(uintptr_t index); +uintptr_t _stm_get_private_page(uintptr_t pagenum); bool _stm_in_transaction(stm_thread_local_t *tl); char *_stm_get_segment_base(long index); void _stm_test_switch(stm_thread_local_t *tl); @@ -108,6 +108,8 @@ object_t *_stm_enum_modified_old_objects(long index); object_t *_stm_enum_objects_pointing_to_nursery(long index); uint64_t _stm_total_allocated(void); +void _stm_mutex_pages_lock(void); +void _stm_mutex_pages_unlock(void); #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 @@ -128,6 +130,12 @@ /* ==================== PUBLIC API ==================== */ +/* Number of segments (i.e. how many threads can be executed in + parallel, in maximum). +*/ +#define STM_NB_SEGMENTS 4 + + /* Structure of objects -------------------- From noreply at buildbot.pypy.org Sun Mar 16 20:59:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 20:59:09 +0100 (CET) Subject: [pypy-commit] stmgc default: Make this comment and reality take a step toward each other Message-ID: <20140316195909.97D651C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1058:543aeede2318 Date: 2014-03-16 20:58 +0100 http://bitbucket.org/pypy/stmgc/changeset/543aeede2318/ Log: Make this comment and reality take a step toward each other diff --git a/c7/demo/Makefile b/c7/demo/Makefile --- a/c7/demo/Makefile +++ b/c7/demo/Makefile @@ -18,13 +18,13 @@ C_FILES = ../stmgc.c ../stm/*.c -# note that 'build' is optimized but still contains all asserts +# note that 'build' is partially optimized but still contains all asserts debug-%: %.c ${H_FILES} ${C_FILES} clang -I.. -pthread -DSTM_DEBUGPRINT -DSTM_GC_NURSERY=128 -g -O0 \ $< -o debug-$* -Wall -Werror ../stmgc.c build-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -DSTM_GC_NURSERY=128 -g -O0 \ + clang -I.. -pthread -DSTM_GC_NURSERY=128 -g -O1 \ $< -o build-$* -Wall -Werror ../stmgc.c release-%: %.c ${H_FILES} ${C_FILES} From noreply at buildbot.pypy.org Sun Mar 16 21:18:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Mar 2014 21:18:02 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: os.fork()! Message-ID: <20140316201802.DB0401C011F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r69980:4891028503f5 Date: 2014-03-16 21:17 +0100 http://bitbucket.org/pypy/pypy/changeset/4891028503f5/ Log: os.fork()! diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -28,6 +28,10 @@ ------------------------------------------------------------ +os.fork()! + +------------------------------------------------------------ + From noreply at buildbot.pypy.org Sun Mar 16 22:40:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 16 Mar 2014 22:40:22 +0100 (CET) Subject: [pypy-commit] pypy default: cleanups Message-ID: <20140316214022.A49D01C011F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69981:eb5cbd7cbd8b Date: 2014-03-16 13:14 -0700 http://bitbucket.org/pypy/pypy/changeset/eb5cbd7cbd8b/ Log: cleanups diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -547,20 +547,18 @@ track_allocation = d.pop('track_allocation', True) if d: raise UnsupportedMallocFlags(d) - TYPE = op.args[0].value if zero: name += '_zero' if add_memory_pressure: name += '_add_memory_pressure' if not track_allocation: name += '_no_track_allocation' + TYPE = op.args[0].value op1 = self.prepare_builtin_call(op, name, args, (TYPE,), TYPE) - if name == 'raw_malloc_varsize': - ITEMTYPE = op.args[0].value.OF - if ITEMTYPE == lltype.Char: - return self._handle_oopspec_call(op1, args, - EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR, - EffectInfo.EF_CAN_RAISE) + if name == 'raw_malloc_varsize' and TYPE.OF == lltype.Char: + return self._handle_oopspec_call(op1, args, + EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR, + EffectInfo.EF_CAN_RAISE) return self.rewrite_op_direct_call(op1) def rewrite_op_malloc_varsize(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_rawbuffer.py b/rpython/jit/metainterp/optimizeopt/test/test_rawbuffer.py --- a/rpython/jit/metainterp/optimizeopt/test/test_rawbuffer.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_rawbuffer.py @@ -17,8 +17,7 @@ ( 4, 2, 'descr2', 'two'), ( 8, 4, 'descr3', 'three'), (12, 2, 'descr4', 'four'), - ] - # + ] def test_write_value_update(): buf = RawBuffer(FakeCPU()) @@ -28,7 +27,7 @@ assert buf._get_memory() == [ ( 0, 4, 'descr', 'ONE'), ( 4, 2, 'descr', 'two'), - ] + ] def test_write_value_invalid_length(): buf = RawBuffer(FakeCPU()) @@ -38,7 +37,6 @@ with py.test.raises(InvalidRawWrite): buf.write_value(0, 4, 'descr2', 'two') - def test_write_value_overlapping_next(): buf = RawBuffer(FakeCPU()) buf.write_value(0, 4, 'descr', 'one') diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -1,7 +1,8 @@ from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, - free_raw_storage, raw_storage_getitem) + free_raw_storage, raw_storage_getitem) + class RawMemTests(object): def test_cast_void_ptr(self): From noreply at buildbot.pypy.org Sun Mar 16 22:40:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 16 Mar 2014 22:40:23 +0100 (CET) Subject: [pypy-commit] pypy virtual-raw-store-load: optimize raw_load/raw_store like arrayitem_raw Message-ID: <20140316214023.D60EC1C011F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: virtual-raw-store-load Changeset: r69982:e165cc881e89 Date: 2014-03-16 14:03 -0700 http://bitbucket.org/pypy/pypy/changeset/e165cc881e89/ Log: optimize raw_load/raw_store like arrayitem_raw diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1894,6 +1894,23 @@ """ self.optimize_loop(ops, expected) + def test_virtual_raw_store_load(self): + ops = """ + [i1] + i0 = call('malloc', 10, descr=raw_malloc_descr) + raw_store(i0, 0, i1, descr=rawarraydescr) + i2 = raw_load(i0, 0, descr=rawarraydescr) + i3 = int_add(i1, i2) + call('free', i0, descr=raw_free_descr) + jump(i3) + """ + expected = """ + [i1] + i2 = int_add(i1, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + def test_duplicate_getfield_1(self): ops = """ [p1, p2] diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -788,6 +788,8 @@ value.ensure_nonnull() self.emit_operation(op) + optimize_RAW_LOAD = optimize_GETARRAYITEM_RAW + def optimize_SETARRAYITEM_RAW(self, op): value = self.getvalue(op.getarg(0)) if value.is_virtual(): @@ -805,6 +807,8 @@ value.ensure_nonnull() self.emit_operation(op) + optimize_RAW_STORE = optimize_SETARRAYITEM_RAW + def optimize_GETINTERIORFIELD_GC(self, op): value = self.getvalue(op.getarg(0)) if value.is_virtual(): From noreply at buildbot.pypy.org Sun Mar 16 22:40:25 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 16 Mar 2014 22:40:25 +0100 (CET) Subject: [pypy-commit] pypy virtual-raw-store-load: raw_load/raw_store take offset not index Message-ID: <20140316214025.177881C011F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: virtual-raw-store-load Changeset: r69983:a56510bcbcc8 Date: 2014-03-16 14:30 -0700 http://bitbucket.org/pypy/pypy/changeset/a56510bcbcc8/ Log: raw_load/raw_store take offset not index diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1894,7 +1894,7 @@ """ self.optimize_loop(ops, expected) - def test_virtual_raw_store_load(self): + def test_virtual_raw_store_raw_load(self): ops = """ [i1] i0 = call('malloc', 10, descr=raw_malloc_descr) @@ -1911,6 +1911,23 @@ """ self.optimize_loop(ops, expected) + def test_virtual_raw_store_getarrayitem_raw(self): + ops = """ + [f1] + i0 = call('malloc', 16, descr=raw_malloc_descr) + raw_store(i0, 8, f1, descr=rawarraydescr_float) + f2 = getarrayitem_raw(i0, 1, descr=rawarraydescr_float) + f3 = float_add(f1, f2) + call('free', i0, descr=raw_free_descr) + jump(f3) + """ + expected = """ + [f1] + f2 = float_add(f1, f1) + jump(f2) + """ + self.optimize_loop(ops, expected) + def test_duplicate_getfield_1(self): ops = """ [p1, p2] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -226,6 +226,8 @@ hints={'nolength': True})) rawarraydescr_char = cpu.arraydescrof(lltype.Array(lltype.Char, hints={'nolength': True})) + rawarraydescr_float = cpu.arraydescrof(lltype.Array(lltype.Float, + hints={'nolength': True})) fc_array = lltype.GcArray( lltype.Struct( diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -788,8 +788,6 @@ value.ensure_nonnull() self.emit_operation(op) - optimize_RAW_LOAD = optimize_GETARRAYITEM_RAW - def optimize_SETARRAYITEM_RAW(self, op): value = self.getvalue(op.getarg(0)) if value.is_virtual(): @@ -807,7 +805,46 @@ value.ensure_nonnull() self.emit_operation(op) - optimize_RAW_STORE = optimize_SETARRAYITEM_RAW + def _unpack_raw_load_store_op(self, op, offsetbox): + offset = offsetbox.getint() + cpu = self.optimizer.cpu + descr = op.getdescr() + itemsize = cpu.unpack_arraydescr_size(descr)[1] + return offset, itemsize, descr + + def optimize_RAW_LOAD(self, op): + value = self.getvalue(op.getarg(0)) + if value.is_virtual(): + offsetbox = self.get_constant_box(op.getarg(1)) + if offsetbox is not None: + offset, itemsize, descr = self._unpack_raw_load_store_op(op, offsetbox) + try: + itemvalue = value.getitem_raw(offset, itemsize, descr) + self.make_equal_to(op.result, itemvalue) + except InvalidRawOperation: + box = value.force_box(self) + op.setarg(0, box) + self.emit_operation(op) + return + value.ensure_nonnull() + self.emit_operation(op) + + def optimize_RAW_STORE(self, op): + value = self.getvalue(op.getarg(0)) + if value.is_virtual(): + offsetbox = self.get_constant_box(op.getarg(1)) + if offsetbox is not None: + offset, itemsize, descr = self._unpack_raw_load_store_op(op, offsetbox) + itemvalue = self.getvalue(op.getarg(2)) + try: + value.setitem_raw(offset, itemsize, descr, itemvalue) + except InvalidRawOperation: + box = value.force_box(self) + op.setarg(0, box) + self.emit_operation(op) + return + value.ensure_nonnull() + self.emit_operation(op) def optimize_GETINTERIORFIELD_GC(self, op): value = self.getvalue(op.getarg(0)) From noreply at buildbot.pypy.org Sun Mar 16 22:40:26 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 16 Mar 2014 22:40:26 +0100 (CET) Subject: [pypy-commit] pypy virtual-raw-store-load: cleanup Message-ID: <20140316214026.4E6391C011F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: virtual-raw-store-load Changeset: r69984:37d2c78f5076 Date: 2014-03-16 14:39 -0700 http://bitbucket.org/pypy/pypy/changeset/37d2c78f5076/ Log: cleanup diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -779,11 +779,12 @@ offset, itemsize, descr = self._unpack_arrayitem_raw_op(op, indexbox) try: itemvalue = value.getitem_raw(offset, itemsize, descr) - self.make_equal_to(op.result, itemvalue) except InvalidRawOperation: box = value.force_box(self) op.setarg(0, box) self.emit_operation(op) + else: + self.make_equal_to(op.result, itemvalue) return value.ensure_nonnull() self.emit_operation(op) @@ -820,11 +821,12 @@ offset, itemsize, descr = self._unpack_raw_load_store_op(op, offsetbox) try: itemvalue = value.getitem_raw(offset, itemsize, descr) - self.make_equal_to(op.result, itemvalue) except InvalidRawOperation: box = value.force_box(self) op.setarg(0, box) self.emit_operation(op) + else: + self.make_equal_to(op.result, itemvalue) return value.ensure_nonnull() self.emit_operation(op) From noreply at buildbot.pypy.org Sun Mar 16 23:20:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 16 Mar 2014 23:20:19 +0100 (CET) Subject: [pypy-commit] pypy virtual-raw-store-load: close branch for merging Message-ID: <20140316222019.D83031C0185@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: virtual-raw-store-load Changeset: r69985:82624ea198f6 Date: 2014-03-16 18:11 -0400 http://bitbucket.org/pypy/pypy/changeset/82624ea198f6/ Log: close branch for merging From noreply at buildbot.pypy.org Sun Mar 16 23:20:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 16 Mar 2014 23:20:21 +0100 (CET) Subject: [pypy-commit] pypy default: merge virtual-raw-store-load Message-ID: <20140316222021.2A2D41C0185@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69986:9fca5c72c344 Date: 2014-03-16 18:11 -0400 http://bitbucket.org/pypy/pypy/changeset/9fca5c72c344/ Log: merge virtual-raw-store-load diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1894,6 +1894,40 @@ """ self.optimize_loop(ops, expected) + def test_virtual_raw_store_raw_load(self): + ops = """ + [i1] + i0 = call('malloc', 10, descr=raw_malloc_descr) + raw_store(i0, 0, i1, descr=rawarraydescr) + i2 = raw_load(i0, 0, descr=rawarraydescr) + i3 = int_add(i1, i2) + call('free', i0, descr=raw_free_descr) + jump(i3) + """ + expected = """ + [i1] + i2 = int_add(i1, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_virtual_raw_store_getarrayitem_raw(self): + ops = """ + [f1] + i0 = call('malloc', 16, descr=raw_malloc_descr) + raw_store(i0, 8, f1, descr=rawarraydescr_float) + f2 = getarrayitem_raw(i0, 1, descr=rawarraydescr_float) + f3 = float_add(f1, f2) + call('free', i0, descr=raw_free_descr) + jump(f3) + """ + expected = """ + [f1] + f2 = float_add(f1, f1) + jump(f2) + """ + self.optimize_loop(ops, expected) + def test_duplicate_getfield_1(self): ops = """ [p1, p2] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -226,6 +226,8 @@ hints={'nolength': True})) rawarraydescr_char = cpu.arraydescrof(lltype.Array(lltype.Char, hints={'nolength': True})) + rawarraydescr_float = cpu.arraydescrof(lltype.Array(lltype.Float, + hints={'nolength': True})) fc_array = lltype.GcArray( lltype.Struct( diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -779,11 +779,12 @@ offset, itemsize, descr = self._unpack_arrayitem_raw_op(op, indexbox) try: itemvalue = value.getitem_raw(offset, itemsize, descr) - self.make_equal_to(op.result, itemvalue) except InvalidRawOperation: box = value.force_box(self) op.setarg(0, box) self.emit_operation(op) + else: + self.make_equal_to(op.result, itemvalue) return value.ensure_nonnull() self.emit_operation(op) @@ -805,6 +806,48 @@ value.ensure_nonnull() self.emit_operation(op) + def _unpack_raw_load_store_op(self, op, offsetbox): + offset = offsetbox.getint() + cpu = self.optimizer.cpu + descr = op.getdescr() + itemsize = cpu.unpack_arraydescr_size(descr)[1] + return offset, itemsize, descr + + def optimize_RAW_LOAD(self, op): + value = self.getvalue(op.getarg(0)) + if value.is_virtual(): + offsetbox = self.get_constant_box(op.getarg(1)) + if offsetbox is not None: + offset, itemsize, descr = self._unpack_raw_load_store_op(op, offsetbox) + try: + itemvalue = value.getitem_raw(offset, itemsize, descr) + except InvalidRawOperation: + box = value.force_box(self) + op.setarg(0, box) + self.emit_operation(op) + else: + self.make_equal_to(op.result, itemvalue) + return + value.ensure_nonnull() + self.emit_operation(op) + + def optimize_RAW_STORE(self, op): + value = self.getvalue(op.getarg(0)) + if value.is_virtual(): + offsetbox = self.get_constant_box(op.getarg(1)) + if offsetbox is not None: + offset, itemsize, descr = self._unpack_raw_load_store_op(op, offsetbox) + itemvalue = self.getvalue(op.getarg(2)) + try: + value.setitem_raw(offset, itemsize, descr, itemvalue) + except InvalidRawOperation: + box = value.force_box(self) + op.setarg(0, box) + self.emit_operation(op) + return + value.ensure_nonnull() + self.emit_operation(op) + def optimize_GETINTERIORFIELD_GC(self, op): value = self.getvalue(op.getarg(0)) if value.is_virtual(): From noreply at buildbot.pypy.org Sun Mar 16 23:20:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 16 Mar 2014 23:20:22 +0100 (CET) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20140316222022.63E651C0185@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69987:4428f691da85 Date: 2014-03-16 18:12 -0400 http://bitbucket.org/pypy/pypy/changeset/4428f691da85/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -105,3 +105,6 @@ .. branch: stdlib-2.7.6 Update stdlib to v2.7.6 + +.. branch: virtual-raw-store-load +Support for virtualizing raw_store/raw_load operations From noreply at buildbot.pypy.org Sun Mar 16 23:20:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 16 Mar 2014 23:20:23 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_zjit Message-ID: <20140316222023.94D751C0185@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69988:297062a586c6 Date: 2014-03-16 18:16 -0400 http://bitbucket.org/pypy/pypy/changeset/297062a586c6/ Log: fix test_zjit diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -526,7 +526,7 @@ 'guard_class': 4, 'guard_false': 2, 'guard_no_exception': 3, - 'guard_nonnull': 8, + 'guard_nonnull': 12, 'guard_nonnull_class': 4, 'guard_not_invalidated': 2, 'guard_true': 9, From noreply at buildbot.pypy.org Mon Mar 17 00:03:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 00:03:39 +0100 (CET) Subject: [pypy-commit] pypy default: skipped tests for alloc_raw_storage optimization Message-ID: <20140316230339.0F83B1D289D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69989:6bfb88a291c0 Date: 2014-03-16 19:02 -0400 http://bitbucket.org/pypy/pypy/changeset/6bfb88a291c0/ Log: skipped tests for alloc_raw_storage optimization diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -1,3 +1,4 @@ +import pytest from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, @@ -45,6 +46,10 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + pytest.skip("XXX alloc_raw_storage doesn't generate virtualizable raw buffer") + self.metainterp.staticdata.stats.check_resops( + {'guard_no_exception': 1, 'finish': 1}, + omit_finish=False) def test_raw_storage_float(self): def f(): @@ -58,6 +63,10 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + pytest.skip("XXX alloc_raw_storage doesn't generate virtualizable raw buffer") + self.metainterp.staticdata.stats.check_resops( + {'guard_no_exception': 1, 'finish': 1}, + omit_finish=False) def test_raw_storage_byte(self): def f(): @@ -71,6 +80,10 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + pytest.skip("XXX alloc_raw_storage doesn't generate virtualizable raw buffer") + self.metainterp.staticdata.stats.check_resops( + {'guard_no_exception': 1, 'finish': 1}, + omit_finish=False) class TestRawMem(RawMemTests, LLJitMixin): From noreply at buildbot.pypy.org Mon Mar 17 00:21:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 00:21:19 +0100 (CET) Subject: [pypy-commit] pypy default: also test alloc_raw_storage with options Message-ID: <20140316232119.423D91C011F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69990:c25774fb2035 Date: 2014-03-16 19:17 -0400 http://bitbucket.org/pypy/pypy/changeset/c25774fb2035/ Log: also test alloc_raw_storage with options diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -85,6 +85,23 @@ {'guard_no_exception': 1, 'finish': 1}, omit_finish=False) + def test_raw_storage_options(self): + def f(): + p = alloc_raw_storage(15, track_allocation=False, zero=True) + raw_storage_setitem(p, 3, 24) + res = raw_storage_getitem(lltype.Signed, p, 3) + free_raw_storage(p, track_allocation=False) + return res + res = self.interp_operations(f, []) + assert res == 24 + self.check_operations_history({'call': 2, 'guard_no_exception': 1, + 'raw_store': 1, 'raw_load': 1, + 'finish': 1}) + pytest.skip("XXX alloc_raw_storage doesn't generate virtualizable raw buffer") + self.metainterp.staticdata.stats.check_resops( + {'guard_no_exception': 1, 'finish': 1}, + omit_finish=False) + class TestRawMem(RawMemTests, LLJitMixin): From noreply at buildbot.pypy.org Mon Mar 17 00:52:46 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 00:52:46 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_raw_malloc_resume Message-ID: <20140316235246.17F041C011F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69991:ff8af8813b5e Date: 2014-03-16 19:51 -0400 http://bitbucket.org/pypy/pypy/changeset/ff8af8813b5e/ Log: fix test_raw_malloc_resume diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -1150,7 +1150,8 @@ res = self.meta_interp(f, [10]) assert res == 55 self.check_trace_count(1) - self.check_resops(setarrayitem_raw=0, getarrayitem_raw=0) + self.check_resops(getarrayitem_raw=0, setarrayitem_raw=0, + raw_load=0, raw_store=0) def test_raw_malloc_resume(self): mydriver = JitDriver(greens=[], reds = 'auto') @@ -1171,8 +1172,9 @@ assert f(10) == 4000+55 res = self.meta_interp(f, [10]) assert res == 4000+55 - # the getarrayitem_raw is in the bridge - self.check_resops(getarrayitem_raw=1, setarrayitem_raw=0) + self.check_trace_count(2) + self.check_resops(getarrayitem_raw=0, setarrayitem_raw=0, + raw_load=0, raw_store=0) def test_raw_malloc_no_virtualstate(self): mydriver = JitDriver(greens=[], reds = 'auto') From noreply at buildbot.pypy.org Mon Mar 17 02:01:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 02:01:19 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140317010119.C930C1C0670@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69992:23bf2ee9e818 Date: 2014-03-16 20:43 -0400 http://bitbucket.org/pypy/pypy/changeset/23bf2ee9e818/ Log: cleanup diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -835,8 +835,8 @@ RESULT = lltype.Ptr(STRUCT) assert RESULT == op.result.concretetype return self._do_builtin_call(op, 'alloc_with_del', [], - extra = (RESULT, vtable), - extrakey = STRUCT) + extra=(RESULT, vtable), + extrakey=STRUCT) heaptracker.register_known_gctype(self.cpu, vtable, STRUCT) opname = 'new_with_vtable' else: @@ -1235,7 +1235,7 @@ op1 = self.prepare_builtin_call(op, "llong_%s", args) op2 = self._handle_oopspec_call(op1, args, EffectInfo.OS_LLONG_%s, - EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) if %r == "TO_INT": assert op2.result.concretetype == lltype.Signed return op2 @@ -1267,7 +1267,7 @@ op1 = self.prepare_builtin_call(op, "ullong_%s", args) op2 = self._handle_oopspec_call(op1, args, EffectInfo.OS_LLONG_%s, - EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) return op2 ''' % (_op, _oopspec.lower(), _oopspec)).compile() From noreply at buildbot.pypy.org Mon Mar 17 02:29:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 02:29:41 +0100 (CET) Subject: [pypy-commit] pypy default: ensure the guard_no_exception is removed after making a virtual raw buffer Message-ID: <20140317012941.CA2C01C357E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69993:3dfe9fa95df6 Date: 2014-03-16 21:22 -0400 http://bitbucket.org/pypy/pypy/changeset/3dfe9fa95df6/ Log: ensure the guard_no_exception is removed after making a virtual raw buffer diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -705,6 +705,7 @@ return size = sizebox.value self.make_virtual_raw_memory(size, op.result, op) + self.last_emitted_operation = REMOVED def do_RAW_FREE(self, op): value = self.getvalue(op.getarg(1)) diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -47,9 +47,7 @@ 'raw_store': 1, 'raw_load': 1, 'finish': 1}) pytest.skip("XXX alloc_raw_storage doesn't generate virtualizable raw buffer") - self.metainterp.staticdata.stats.check_resops( - {'guard_no_exception': 1, 'finish': 1}, - omit_finish=False) + self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) def test_raw_storage_float(self): def f(): @@ -64,9 +62,7 @@ 'raw_store': 1, 'raw_load': 1, 'finish': 1}) pytest.skip("XXX alloc_raw_storage doesn't generate virtualizable raw buffer") - self.metainterp.staticdata.stats.check_resops( - {'guard_no_exception': 1, 'finish': 1}, - omit_finish=False) + self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) def test_raw_storage_byte(self): def f(): @@ -81,9 +77,7 @@ 'raw_store': 1, 'raw_load': 1, 'finish': 1}) pytest.skip("XXX alloc_raw_storage doesn't generate virtualizable raw buffer") - self.metainterp.staticdata.stats.check_resops( - {'guard_no_exception': 1, 'finish': 1}, - omit_finish=False) + self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) def test_raw_storage_options(self): def f(): @@ -98,9 +92,7 @@ 'raw_store': 1, 'raw_load': 1, 'finish': 1}) pytest.skip("XXX alloc_raw_storage doesn't generate virtualizable raw buffer") - self.metainterp.staticdata.stats.check_resops( - {'guard_no_exception': 1, 'finish': 1}, - omit_finish=False) + self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) class TestRawMem(RawMemTests, LLJitMixin): diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -1150,8 +1150,7 @@ res = self.meta_interp(f, [10]) assert res == 55 self.check_trace_count(1) - self.check_resops(getarrayitem_raw=0, setarrayitem_raw=0, - raw_load=0, raw_store=0) + self.check_resops({'guard_true': 2, 'int_add': 4, 'int_lt': 2, 'jump': 1}) def test_raw_malloc_resume(self): mydriver = JitDriver(greens=[], reds = 'auto') @@ -1173,8 +1172,9 @@ res = self.meta_interp(f, [10]) assert res == 4000+55 self.check_trace_count(2) - self.check_resops(getarrayitem_raw=0, setarrayitem_raw=0, - raw_load=0, raw_store=0) + self.check_resops({'guard_false': 2, 'guard_true': 5, + 'int_add': 8, 'int_gt': 3, 'int_lt': 4, 'int_mul': 2, + 'jump': 2}) def test_raw_malloc_no_virtualstate(self): mydriver = JitDriver(greens=[], reds = 'auto') From noreply at buildbot.pypy.org Mon Mar 17 03:53:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 03:53:07 +0100 (CET) Subject: [pypy-commit] pypy default: include guard_no_exception after raw malloc calls in tests Message-ID: <20140317025307.895881C0185@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69994:299fc1410b29 Date: 2014-03-16 22:51 -0400 http://bitbucket.org/pypy/pypy/changeset/299fc1410b29/ Log: include guard_no_exception after raw malloc calls in tests diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1729,6 +1729,7 @@ ops = """ [i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr) call('free', i2, descr=raw_free_descr) @@ -1744,6 +1745,7 @@ ops = """ [i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr_char) setarrayitem_raw(i2, 2, 456, descr=rawarraydescr_char) setarrayitem_raw(i2, 1, 123, descr=rawarraydescr_char) @@ -1756,6 +1758,7 @@ [i1] label('foo') i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr_char) i3 = int_add(i2, 1) setarrayitem_raw(i3, 0, 123, descr=rawarraydescr_char) @@ -1771,6 +1774,7 @@ ops = """ [i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) label('foo') # we expect the buffer to be forced *after* the label setarrayitem_raw(i2, 2, 456, descr=rawarraydescr_char) # overlap! @@ -1781,6 +1785,7 @@ [i1] label('foo') i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) setarrayitem_raw(i2, 2, 456, descr=rawarraydescr_char) call('free', i2, descr=raw_free_descr) @@ -1792,6 +1797,7 @@ ops = """ [i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) label('foo') # we expect the buffer to be forced *after* the label i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr_char) @@ -1802,6 +1808,7 @@ [i1] label('foo') i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr_char) call('free', i2, descr=raw_free_descr) @@ -1813,6 +1820,7 @@ ops = """ [i0, i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, 42, descr=rawarraydescr_char) i3 = int_add(i2, 1) # get a slice of the original buffer setarrayitem_raw(i3, 0, 4242, descr=rawarraydescr) # write to the slice @@ -1832,6 +1840,7 @@ ops = """ [i0, i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] i3 = int_add(i2, 1) # get a slice of the original buffer i4 = int_add(i3, 1) # get a slice of a slice setarrayitem_raw(i4, 0, i1, descr=rawarraydescr_char) # write to the slice @@ -1849,6 +1858,7 @@ ops = """ [i0, i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, 42, descr=rawarraydescr_char) i3 = int_add(i2, 1) # get a slice of the original buffer setarrayitem_raw(i3, 4, 4242, descr=rawarraydescr_char) # write to the slice @@ -1861,6 +1871,7 @@ label('foo') # these ops are generated by VirtualRawBufferValue._really_force i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, 42, descr=rawarraydescr_char) i3 = int_add(i2, 5) # 1+4*sizeof(char) setarrayitem_raw(i3, 0, 4242, descr=rawarraydescr_char) @@ -1878,6 +1889,7 @@ i2 = int_add(i1, 1) call('free', i0, descr=raw_free_descr) i3 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i3, 0, i2, descr=rawarraydescr) label('foo') jump(i3) @@ -1889,6 +1901,7 @@ call('free', i0, descr=raw_free_descr) label('foo') i3 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i3, 0, i2, descr=rawarraydescr) jump(i3) """ @@ -1898,6 +1911,7 @@ ops = """ [i1] i0 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] raw_store(i0, 0, i1, descr=rawarraydescr) i2 = raw_load(i0, 0, descr=rawarraydescr) i3 = int_add(i1, i2) @@ -1915,6 +1929,7 @@ ops = """ [f1] i0 = call('malloc', 16, descr=raw_malloc_descr) + guard_no_exception() [] raw_store(i0, 8, f1, descr=rawarraydescr_float) f2 = getarrayitem_raw(i0, 1, descr=rawarraydescr_float) f3 = float_add(f1, f2) From noreply at buildbot.pypy.org Mon Mar 17 04:09:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 04:09:31 +0100 (CET) Subject: [pypy-commit] pypy default: set oopspecindex for all versions of raw_malloc_varsize_char and raw_free Message-ID: <20140317030931.A18281C011F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69995:d026840bbc03 Date: 2014-03-16 23:06 -0400 http://bitbucket.org/pypy/pypy/changeset/d026840bbc03/ Log: set oopspecindex for all versions of raw_malloc_varsize_char and raw_free diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -555,7 +555,7 @@ name += '_no_track_allocation' TYPE = op.args[0].value op1 = self.prepare_builtin_call(op, name, args, (TYPE,), TYPE) - if name == 'raw_malloc_varsize' and TYPE.OF == lltype.Char: + if name.startswith('raw_malloc_varsize') and TYPE.OF == lltype.Char: return self._handle_oopspec_call(op1, args, EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR, EffectInfo.EF_CAN_RAISE) @@ -589,7 +589,7 @@ name += '_no_track_allocation' op1 = self.prepare_builtin_call(op, name, [op.args[0]], (STRUCT,), STRUCT) - if name == 'raw_free': + if name.startswith('raw_free'): return self._handle_oopspec_call(op1, [op.args[0]], EffectInfo.OS_RAW_FREE, EffectInfo.EF_CANNOT_RAISE) diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -60,7 +60,7 @@ class FakeResidualCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op, **kwds): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None): return 'calldescr' def calldescr_canraise(self, calldescr): return True diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -1,4 +1,3 @@ -import pytest from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, @@ -46,7 +45,6 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) - pytest.skip("XXX alloc_raw_storage doesn't generate virtualizable raw buffer") self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) def test_raw_storage_float(self): @@ -61,7 +59,6 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) - pytest.skip("XXX alloc_raw_storage doesn't generate virtualizable raw buffer") self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) def test_raw_storage_byte(self): @@ -76,7 +73,6 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) - pytest.skip("XXX alloc_raw_storage doesn't generate virtualizable raw buffer") self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) def test_raw_storage_options(self): @@ -91,7 +87,6 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) - pytest.skip("XXX alloc_raw_storage doesn't generate virtualizable raw buffer") self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) From noreply at buildbot.pypy.org Mon Mar 17 04:23:46 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 17 Mar 2014 04:23:46 +0100 (CET) Subject: [pypy-commit] pypy default: Support creating subclasses of _socket (who knows why) Message-ID: <20140317032346.BB49B1C0185@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r69996:f777f3fcf74a Date: 2014-03-16 20:21 -0700 http://bitbucket.org/pypy/pypy/changeset/f777f3fcf74a/ Log: Support creating subclasses of _socket (who knows why) diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -536,12 +536,9 @@ @unwrap_spec(family=int, type=int, proto=int) def newsocket(space, w_subtype, family=AF_INET, type=SOCK_STREAM, proto=0): - # XXX If we want to support subclassing the socket type we will need - # something along these lines. But allocate_instance is only defined - # on the standard object space, so this is not really correct. - #sock = space.allocate_instance(W_RSocket, w_subtype) - #Socket.__init__(sock, space, fd, family, type, proto) + sock = space.allocate_instance(W_RSocket, w_subtype) try: + W_RSocket.__init__(sock, family, type, proto) sock = W_RSocket(family, type, proto) except SocketError, e: raise converted_error(space, e) From noreply at buildbot.pypy.org Mon Mar 17 04:23:48 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 17 Mar 2014 04:23:48 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20140317032348.4CF471C0185@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r69997:039dd41e5153 Date: 2014-03-16 20:23 -0700 http://bitbucket.org/pypy/pypy/changeset/039dd41e5153/ Log: merged upstream diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -105,3 +105,6 @@ .. branch: stdlib-2.7.6 Update stdlib to v2.7.6 + +.. branch: virtual-raw-store-load +Support for virtualizing raw_store/raw_load operations diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -526,7 +526,7 @@ 'guard_class': 4, 'guard_false': 2, 'guard_no_exception': 3, - 'guard_nonnull': 8, + 'guard_nonnull': 12, 'guard_nonnull_class': 4, 'guard_not_invalidated': 2, 'guard_true': 9, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -547,20 +547,18 @@ track_allocation = d.pop('track_allocation', True) if d: raise UnsupportedMallocFlags(d) - TYPE = op.args[0].value if zero: name += '_zero' if add_memory_pressure: name += '_add_memory_pressure' if not track_allocation: name += '_no_track_allocation' + TYPE = op.args[0].value op1 = self.prepare_builtin_call(op, name, args, (TYPE,), TYPE) - if name == 'raw_malloc_varsize': - ITEMTYPE = op.args[0].value.OF - if ITEMTYPE == lltype.Char: - return self._handle_oopspec_call(op1, args, - EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR, - EffectInfo.EF_CAN_RAISE) + if name.startswith('raw_malloc_varsize') and TYPE.OF == lltype.Char: + return self._handle_oopspec_call(op1, args, + EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR, + EffectInfo.EF_CAN_RAISE) return self.rewrite_op_direct_call(op1) def rewrite_op_malloc_varsize(self, op): @@ -591,7 +589,7 @@ name += '_no_track_allocation' op1 = self.prepare_builtin_call(op, name, [op.args[0]], (STRUCT,), STRUCT) - if name == 'raw_free': + if name.startswith('raw_free'): return self._handle_oopspec_call(op1, [op.args[0]], EffectInfo.OS_RAW_FREE, EffectInfo.EF_CANNOT_RAISE) @@ -837,8 +835,8 @@ RESULT = lltype.Ptr(STRUCT) assert RESULT == op.result.concretetype return self._do_builtin_call(op, 'alloc_with_del', [], - extra = (RESULT, vtable), - extrakey = STRUCT) + extra=(RESULT, vtable), + extrakey=STRUCT) heaptracker.register_known_gctype(self.cpu, vtable, STRUCT) opname = 'new_with_vtable' else: @@ -1237,7 +1235,7 @@ op1 = self.prepare_builtin_call(op, "llong_%s", args) op2 = self._handle_oopspec_call(op1, args, EffectInfo.OS_LLONG_%s, - EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) if %r == "TO_INT": assert op2.result.concretetype == lltype.Signed return op2 @@ -1269,7 +1267,7 @@ op1 = self.prepare_builtin_call(op, "ullong_%s", args) op2 = self._handle_oopspec_call(op1, args, EffectInfo.OS_LLONG_%s, - EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) return op2 ''' % (_op, _oopspec.lower(), _oopspec)).compile() diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -60,7 +60,7 @@ class FakeResidualCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op, **kwds): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None): return 'calldescr' def calldescr_canraise(self, calldescr): return True diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1729,6 +1729,7 @@ ops = """ [i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr) call('free', i2, descr=raw_free_descr) @@ -1744,6 +1745,7 @@ ops = """ [i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr_char) setarrayitem_raw(i2, 2, 456, descr=rawarraydescr_char) setarrayitem_raw(i2, 1, 123, descr=rawarraydescr_char) @@ -1756,6 +1758,7 @@ [i1] label('foo') i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr_char) i3 = int_add(i2, 1) setarrayitem_raw(i3, 0, 123, descr=rawarraydescr_char) @@ -1771,6 +1774,7 @@ ops = """ [i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) label('foo') # we expect the buffer to be forced *after* the label setarrayitem_raw(i2, 2, 456, descr=rawarraydescr_char) # overlap! @@ -1781,6 +1785,7 @@ [i1] label('foo') i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) setarrayitem_raw(i2, 2, 456, descr=rawarraydescr_char) call('free', i2, descr=raw_free_descr) @@ -1792,6 +1797,7 @@ ops = """ [i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) label('foo') # we expect the buffer to be forced *after* the label i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr_char) @@ -1802,6 +1808,7 @@ [i1] label('foo') i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr_char) call('free', i2, descr=raw_free_descr) @@ -1813,6 +1820,7 @@ ops = """ [i0, i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, 42, descr=rawarraydescr_char) i3 = int_add(i2, 1) # get a slice of the original buffer setarrayitem_raw(i3, 0, 4242, descr=rawarraydescr) # write to the slice @@ -1832,6 +1840,7 @@ ops = """ [i0, i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] i3 = int_add(i2, 1) # get a slice of the original buffer i4 = int_add(i3, 1) # get a slice of a slice setarrayitem_raw(i4, 0, i1, descr=rawarraydescr_char) # write to the slice @@ -1849,6 +1858,7 @@ ops = """ [i0, i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, 42, descr=rawarraydescr_char) i3 = int_add(i2, 1) # get a slice of the original buffer setarrayitem_raw(i3, 4, 4242, descr=rawarraydescr_char) # write to the slice @@ -1861,6 +1871,7 @@ label('foo') # these ops are generated by VirtualRawBufferValue._really_force i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, 42, descr=rawarraydescr_char) i3 = int_add(i2, 5) # 1+4*sizeof(char) setarrayitem_raw(i3, 0, 4242, descr=rawarraydescr_char) @@ -1878,6 +1889,7 @@ i2 = int_add(i1, 1) call('free', i0, descr=raw_free_descr) i3 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i3, 0, i2, descr=rawarraydescr) label('foo') jump(i3) @@ -1889,11 +1901,48 @@ call('free', i0, descr=raw_free_descr) label('foo') i3 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i3, 0, i2, descr=rawarraydescr) jump(i3) """ self.optimize_loop(ops, expected) + def test_virtual_raw_store_raw_load(self): + ops = """ + [i1] + i0 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] + raw_store(i0, 0, i1, descr=rawarraydescr) + i2 = raw_load(i0, 0, descr=rawarraydescr) + i3 = int_add(i1, i2) + call('free', i0, descr=raw_free_descr) + jump(i3) + """ + expected = """ + [i1] + i2 = int_add(i1, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_virtual_raw_store_getarrayitem_raw(self): + ops = """ + [f1] + i0 = call('malloc', 16, descr=raw_malloc_descr) + guard_no_exception() [] + raw_store(i0, 8, f1, descr=rawarraydescr_float) + f2 = getarrayitem_raw(i0, 1, descr=rawarraydescr_float) + f3 = float_add(f1, f2) + call('free', i0, descr=raw_free_descr) + jump(f3) + """ + expected = """ + [f1] + f2 = float_add(f1, f1) + jump(f2) + """ + self.optimize_loop(ops, expected) + def test_duplicate_getfield_1(self): ops = """ [p1, p2] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_rawbuffer.py b/rpython/jit/metainterp/optimizeopt/test/test_rawbuffer.py --- a/rpython/jit/metainterp/optimizeopt/test/test_rawbuffer.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_rawbuffer.py @@ -17,8 +17,7 @@ ( 4, 2, 'descr2', 'two'), ( 8, 4, 'descr3', 'three'), (12, 2, 'descr4', 'four'), - ] - # + ] def test_write_value_update(): buf = RawBuffer(FakeCPU()) @@ -28,7 +27,7 @@ assert buf._get_memory() == [ ( 0, 4, 'descr', 'ONE'), ( 4, 2, 'descr', 'two'), - ] + ] def test_write_value_invalid_length(): buf = RawBuffer(FakeCPU()) @@ -38,7 +37,6 @@ with py.test.raises(InvalidRawWrite): buf.write_value(0, 4, 'descr2', 'two') - def test_write_value_overlapping_next(): buf = RawBuffer(FakeCPU()) buf.write_value(0, 4, 'descr', 'one') diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -226,6 +226,8 @@ hints={'nolength': True})) rawarraydescr_char = cpu.arraydescrof(lltype.Array(lltype.Char, hints={'nolength': True})) + rawarraydescr_float = cpu.arraydescrof(lltype.Array(lltype.Float, + hints={'nolength': True})) fc_array = lltype.GcArray( lltype.Struct( diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -705,6 +705,7 @@ return size = sizebox.value self.make_virtual_raw_memory(size, op.result, op) + self.last_emitted_operation = REMOVED def do_RAW_FREE(self, op): value = self.getvalue(op.getarg(1)) @@ -779,11 +780,12 @@ offset, itemsize, descr = self._unpack_arrayitem_raw_op(op, indexbox) try: itemvalue = value.getitem_raw(offset, itemsize, descr) - self.make_equal_to(op.result, itemvalue) except InvalidRawOperation: box = value.force_box(self) op.setarg(0, box) self.emit_operation(op) + else: + self.make_equal_to(op.result, itemvalue) return value.ensure_nonnull() self.emit_operation(op) @@ -805,6 +807,48 @@ value.ensure_nonnull() self.emit_operation(op) + def _unpack_raw_load_store_op(self, op, offsetbox): + offset = offsetbox.getint() + cpu = self.optimizer.cpu + descr = op.getdescr() + itemsize = cpu.unpack_arraydescr_size(descr)[1] + return offset, itemsize, descr + + def optimize_RAW_LOAD(self, op): + value = self.getvalue(op.getarg(0)) + if value.is_virtual(): + offsetbox = self.get_constant_box(op.getarg(1)) + if offsetbox is not None: + offset, itemsize, descr = self._unpack_raw_load_store_op(op, offsetbox) + try: + itemvalue = value.getitem_raw(offset, itemsize, descr) + except InvalidRawOperation: + box = value.force_box(self) + op.setarg(0, box) + self.emit_operation(op) + else: + self.make_equal_to(op.result, itemvalue) + return + value.ensure_nonnull() + self.emit_operation(op) + + def optimize_RAW_STORE(self, op): + value = self.getvalue(op.getarg(0)) + if value.is_virtual(): + offsetbox = self.get_constant_box(op.getarg(1)) + if offsetbox is not None: + offset, itemsize, descr = self._unpack_raw_load_store_op(op, offsetbox) + itemvalue = self.getvalue(op.getarg(2)) + try: + value.setitem_raw(offset, itemsize, descr, itemvalue) + except InvalidRawOperation: + box = value.force_box(self) + op.setarg(0, box) + self.emit_operation(op) + return + value.ensure_nonnull() + self.emit_operation(op) + def optimize_GETINTERIORFIELD_GC(self, op): value = self.getvalue(op.getarg(0)) if value.is_virtual(): diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -1,7 +1,8 @@ from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, - free_raw_storage, raw_storage_getitem) + free_raw_storage, raw_storage_getitem) + class RawMemTests(object): def test_cast_void_ptr(self): @@ -44,6 +45,7 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) def test_raw_storage_float(self): def f(): @@ -57,6 +59,7 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) def test_raw_storage_byte(self): def f(): @@ -70,6 +73,21 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) + + def test_raw_storage_options(self): + def f(): + p = alloc_raw_storage(15, track_allocation=False, zero=True) + raw_storage_setitem(p, 3, 24) + res = raw_storage_getitem(lltype.Signed, p, 3) + free_raw_storage(p, track_allocation=False) + return res + res = self.interp_operations(f, []) + assert res == 24 + self.check_operations_history({'call': 2, 'guard_no_exception': 1, + 'raw_store': 1, 'raw_load': 1, + 'finish': 1}) + self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) class TestRawMem(RawMemTests, LLJitMixin): diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -1150,7 +1150,7 @@ res = self.meta_interp(f, [10]) assert res == 55 self.check_trace_count(1) - self.check_resops(setarrayitem_raw=0, getarrayitem_raw=0) + self.check_resops({'guard_true': 2, 'int_add': 4, 'int_lt': 2, 'jump': 1}) def test_raw_malloc_resume(self): mydriver = JitDriver(greens=[], reds = 'auto') @@ -1171,8 +1171,10 @@ assert f(10) == 4000+55 res = self.meta_interp(f, [10]) assert res == 4000+55 - # the getarrayitem_raw is in the bridge - self.check_resops(getarrayitem_raw=1, setarrayitem_raw=0) + self.check_trace_count(2) + self.check_resops({'guard_false': 2, 'guard_true': 5, + 'int_add': 8, 'int_gt': 3, 'int_lt': 4, 'int_mul': 2, + 'jump': 2}) def test_raw_malloc_no_virtualstate(self): mydriver = JitDriver(greens=[], reds = 'auto') From noreply at buildbot.pypy.org Mon Mar 17 04:31:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 04:31:39 +0100 (CET) Subject: [pypy-commit] pypy default: fix f777f3fcf74a, add socket subclass test Message-ID: <20140317033139.6CCA41C0670@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69998:10525853b510 Date: 2014-03-16 23:30 -0400 http://bitbucket.org/pypy/pypy/changeset/10525853b510/ Log: fix f777f3fcf74a, add socket subclass test diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -539,7 +539,6 @@ sock = space.allocate_instance(W_RSocket, w_subtype) try: W_RSocket.__init__(sock, family, type, proto) - sock = W_RSocket(family, type, proto) except SocketError, e: raise converted_error(space, e) return space.wrap(sock) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -399,7 +399,7 @@ name = s.getpeername() # Will raise socket.error if not connected assert name[1] == 80 s.close() - + def test_socket_connect_ex(self): import _socket s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) @@ -474,6 +474,13 @@ import socket s = socket.socket() + def test_subclass(self): + from _socket import socket + class MySock(socket): + blah = 123 + s = MySock() + assert s.blah == 123 + def test_getsetsockopt(self): import _socket as socket import struct From noreply at buildbot.pypy.org Mon Mar 17 04:43:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 04:43:48 +0100 (CET) Subject: [pypy-commit] pypy default: set socket buffer size so untranslated tests finish in reasonable time Message-ID: <20140317034348.E52471C11A4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69999:cc839a8262d5 Date: 2014-03-16 23:42 -0400 http://bitbucket.org/pypy/pypy/changeset/cc839a8262d5/ Log: set socket buffer size so untranslated tests finish in reasonable time diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -582,11 +582,11 @@ class AppTestSocketTCP: + HOST = 'localhost' + def setup_class(cls): cls.space = space - HOST = 'localhost' - def setup_method(self, method): w_HOST = space.wrap(self.HOST) self.w_serv = space.appexec([w_socket, w_HOST], @@ -596,6 +596,7 @@ serv.listen(1) return serv ''') + def teardown_method(self, method): if hasattr(self, 'w_serv'): space.appexec([self.w_serv], '(serv): serv.close()') @@ -616,7 +617,7 @@ raises(error, raise_error) def test_recv_send_timeout(self): - from _socket import socket, timeout + from _socket import socket, timeout, SOL_SOCKET, SO_RCVBUF, SO_SNDBUF cli = socket() cli.connect(self.serv.getsockname()) t, addr = self.serv.accept() @@ -636,6 +637,9 @@ assert count is None buf = t.recv(1) assert buf == '?' + # speed up filling the buffers + t.setsockopt(SOL_SOCKET, SO_RCVBUF, 4096) + cli.setsockopt(SOL_SOCKET, SO_SNDBUF, 4096) # test send() timeout count = 0 try: @@ -663,7 +667,7 @@ conn, addr = self.serv.accept() buf = buffer(MSG) conn.send(buf) - buf = array.array('c', ' '*1024) + buf = array.array('c', ' ' * 1024) nbytes = cli.recv_into(buf) assert nbytes == len(MSG) msg = buf.tostring()[:len(MSG)] @@ -678,7 +682,7 @@ conn, addr = self.serv.accept() buf = buffer(MSG) conn.send(buf) - buf = array.array('c', ' '*1024) + buf = array.array('c', ' ' * 1024) nbytes, addr = cli.recvfrom_into(buf) assert nbytes == len(MSG) msg = buf.tostring()[:len(MSG)] @@ -689,6 +693,7 @@ cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) assert cli.family == socket.AF_INET + class AppTestErrno: def setup_class(cls): cls.space = space From noreply at buildbot.pypy.org Mon Mar 17 05:30:44 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 05:30:44 +0100 (CET) Subject: [pypy-commit] pypy default: update test_flat_setitem Message-ID: <20140317043044.690571C0185@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70000:40755c7bd64c Date: 2014-03-17 00:28 -0400 http://bitbucket.org/pypy/pypy/changeset/40755c7bd64c/ Log: update test_flat_setitem diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -480,23 +480,19 @@ def test_flat_setitem(self): result = self.run("flat_setitem") assert result == 1.0 - py.test.skip("don't run for now") self.check_trace_count(1) - # XXX not ideal, but hey, let's ignore it for now - self.check_simple_loop({'raw_load': 1, - 'raw_store': 1, - 'int_lt': 1, - 'int_gt': 1, - 'int_add': 4, - 'guard_true': 2, - 'arraylen_gc': 2, - 'jump': 1, - 'int_sub': 1, - # XXX bad part - 'int_and': 1, - 'int_mod': 1, - 'int_rshift': 1, - }) + self.check_simple_loop({ + 'call': 2, + 'getfield_gc': 2, + 'guard_no_exception': 2, + 'guard_not_invalidated': 1, + 'guard_true': 1, + 'int_gt': 1, + 'int_sub': 1, + 'jump': 1, + 'raw_load': 1, + 'raw_store': 1, + }) def define_dot(): return """ @@ -509,6 +505,7 @@ def test_dot(self): result = self.run("dot") assert result == 184 + self.check_trace_count(3) self.check_simple_loop({'float_add': 1, 'float_mul': 1, 'guard_not_invalidated': 1, From noreply at buildbot.pypy.org Mon Mar 17 05:54:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 05:54:41 +0100 (CET) Subject: [pypy-commit] pypy default: fix socket exception type for overflowing port/flowinfo Message-ID: <20140317045442.005A41C0670@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70001:13377182ed09 Date: 2014-03-17 00:53 -0400 http://bitbucket.org/pypy/pypy/changeset/13377182ed09/ Log: fix socket exception type for overflowing port/flowinfo diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -65,10 +65,7 @@ else: flowinfo = 0 if len(pieces_w) > 3: scope_id = space.uint_w(pieces_w[3]) else: scope_id = 0 - if flowinfo < 0 or flowinfo > 0xfffff: - raise OperationError(space.w_OverflowError, space.wrap( - "flowinfo must be 0-1048575.")) - flowinfo = rffi.cast(lltype.Unsigned, flowinfo) + flowinfo = make_unsigned_flowinfo(space, flowinfo) a = addr.lock(_c.sockaddr_in6) rffi.setintfield(a, 'c_sin6_port', rsocket.htons(port)) rffi.setintfield(a, 'c_sin6_flowinfo', rsocket.htonl(flowinfo)) @@ -97,10 +94,7 @@ else: flowinfo = 0 if len(pieces_w) > 3: scope_id = space.uint_w(pieces_w[3]) else: scope_id = 0 - if flowinfo < 0 or flowinfo > 0xfffff: - raise OperationError(space.w_OverflowError, space.wrap( - "flowinfo must be 0-1048575.")) - flowinfo = rffi.cast(lltype.Unsigned, flowinfo) + flowinfo = make_unsigned_flowinfo(space, flowinfo) return rsocket.INET6Address(host, port, flowinfo, scope_id) if rsocket.HAS_AF_UNIX and family == rsocket.AF_UNIX: return rsocket.UNIXAddress(space.str_w(w_address)) @@ -112,10 +106,16 @@ # XXX Hack to seperate rpython and pypy def make_ushort_port(space, port): if port < 0 or port > 0xffff: - raise OperationError(space.w_ValueError, space.wrap( + raise OperationError(space.w_OverflowError, space.wrap( "port must be 0-65535.")) return rffi.cast(rffi.USHORT, port) +def make_unsigned_flowinfo(space, flowinfo): + if flowinfo < 0 or flowinfo > 0xfffff: + raise OperationError(space.w_OverflowError, space.wrap( + "flowinfo must be 0-1048575.")) + return rffi.cast(lltype.Unsigned, flowinfo) + # XXX Hack to seperate rpython and pypy def ipaddr_from_object(space, w_sockaddr): host = space.str_w(space.getitem(w_sockaddr, space.wrap(0))) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -425,8 +425,13 @@ def test_bigport(self): import _socket s = _socket.socket() - raises(ValueError, s.connect, ("localhost", 1000000)) - raises(ValueError, s.connect, ("localhost", -1)) + exc = raises(OverflowError, s.connect, ("localhost", -1)) + assert "port must be 0-65535." in str(exc.value) + exc = raises(OverflowError, s.connect, ("localhost", 1000000)) + assert "port must be 0-65535." in str(exc.value) + s = _socket.socket(_socket.AF_INET6) + exc = raises(OverflowError, s.connect, ("::1", 1234, 1048576)) + assert "flowinfo must be 0-1048575." in str(exc.value) def test_NtoH(self): import sys From noreply at buildbot.pypy.org Mon Mar 17 06:16:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 06:16:41 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_pypy_c after d026840bbc03 Message-ID: <20140317051641.86F6D1C011F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70002:d4e0f726ebc2 Date: 2014-03-16 22:15 -0700 http://bitbucket.org/pypy/pypy/changeset/d4e0f726ebc2/ Log: fix test_pypy_c after d026840bbc03 diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -341,7 +341,7 @@ guard_value(p166, ConstPtr(ptr72), descr=...) p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=) guard_no_exception(descr=...) - i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) + i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) setfield_gc(p167, 0, descr=) setfield_gc(p167, ConstPtr(ptr86), descr=) guard_no_exception(descr=...) From noreply at buildbot.pypy.org Mon Mar 17 07:52:13 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 07:52:13 +0100 (CET) Subject: [pypy-commit] pypy default: fix virtual raw malloc for all constant args Message-ID: <20140317065213.411BB1C0166@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70003:b81700f91370 Date: 2014-03-17 02:05 -0400 http://bitbucket.org/pypy/pypy/changeset/b81700f91370/ Log: fix virtual raw malloc for all constant args diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1725,7 +1725,7 @@ # We cannot track virtuals that survive for more than two iterations. self.optimize_loop(ops, expected, preamble) - def test_virtual_raw_malloc(self): + def test_virtual_raw_malloc_basic(self): ops = """ [i1] i2 = call('malloc', 10, descr=raw_malloc_descr) @@ -1741,6 +1741,23 @@ """ self.optimize_loop(ops, expected) + def test_virtual_raw_malloc_const(self): + ops = """ + [i1] + i5 = int_mul(10, 1) + i2 = call('malloc', i5, descr=raw_malloc_descr) + guard_no_exception() [] + setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) + i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr) + call('free', i2, descr=raw_free_descr) + jump(i3) + """ + expected = """ + [i1] + jump(i1) + """ + self.optimize_loop(ops, expected) + def test_virtual_raw_malloc_force(self): ops = """ [i1] diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -699,12 +699,11 @@ self.emit_operation(op) def do_RAW_MALLOC_VARSIZE_CHAR(self, op): - sizebox = op.getarg(1) - if not isinstance(sizebox, ConstInt): + sizebox = self.get_constant_box(op.getarg(1)) + if sizebox is None: self.emit_operation(op) return - size = sizebox.value - self.make_virtual_raw_memory(size, op.result, op) + self.make_virtual_raw_memory(sizebox.getint(), op.result, op) self.last_emitted_operation = REMOVED def do_RAW_FREE(self, op): From noreply at buildbot.pypy.org Mon Mar 17 07:52:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 07:52:14 +0100 (CET) Subject: [pypy-commit] pypy default: emit_operation should handle this Message-ID: <20140317065214.AAA061C0166@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70004:9b585f45af76 Date: 2014-03-17 02:19 -0400 http://bitbucket.org/pypy/pypy/changeset/9b585f45af76/ Log: emit_operation should handle this diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -674,11 +674,6 @@ def optimize_NEW_ARRAY(self, op): sizebox = self.get_constant_box(op.getarg(0)) if sizebox is not None: - # if the original 'op' did not have a ConstInt as argument, - # build a new one with the ConstInt argument - if not isinstance(op.getarg(0), ConstInt): - op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result, - descr=op.getdescr()) self.make_varray(op.getdescr(), sizebox.getint(), op.result, op) else: self.getvalue(op.result).ensure_nonnull() From noreply at buildbot.pypy.org Mon Mar 17 07:52:15 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 07:52:15 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140317065215.EDF761C0166@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70005:0e26ea0abf69 Date: 2014-03-17 02:45 -0400 http://bitbucket.org/pypy/pypy/changeset/0e26ea0abf69/ Log: cleanup diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -605,9 +605,9 @@ i1 = ptr_eq(p0, NULL) guard_false(i1) [] i2 = ptr_ne(NULL, p0) - guard_true(i0) [] + guard_true(i2) [] i3 = ptr_eq(NULL, p0) - guard_false(i1) [] + guard_false(i3) [] guard_nonnull(p0) [] jump(p0) """ @@ -622,6 +622,30 @@ """ self.optimize_loop(ops, expected, preamble) + def test_nonnull_2(self): + ops = """ + [] + p0 = new_array(5, descr=arraydescr) # forces p0 != NULL + i0 = ptr_ne(p0, NULL) + guard_true(i0) [] + i1 = ptr_eq(p0, NULL) + guard_false(i1) [] + i2 = ptr_ne(NULL, p0) + guard_true(i2) [] + i3 = ptr_eq(NULL, p0) + guard_false(i3) [] + guard_nonnull(p0) [] + escape(p0) + jump() + """ + expected = """ + [] + p0 = new_array(5, descr=arraydescr) + escape(p0) + jump() + """ + self.optimize_loop(ops, expected) + def test_const_guard_value(self): ops = """ [] diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -676,7 +676,6 @@ if sizebox is not None: self.make_varray(op.getdescr(), sizebox.getint(), op.result, op) else: - self.getvalue(op.result).ensure_nonnull() self.emit_operation(op) def optimize_CALL(self, op): From noreply at buildbot.pypy.org Mon Mar 17 10:48:27 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 17 Mar 2014 10:48:27 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: start working on optimizing consecutive dict lookups Message-ID: <20140317094827.4EA6E1C35F0@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70006:f6665773ece4 Date: 2014-03-14 17:33 +0200 http://bitbucket.org/pypy/pypy/changeset/f6665773ece4/ Log: start working on optimizing consecutive dict lookups diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -21,6 +21,7 @@ OS_ARRAYCOPY = 1 # "list.ll_arraycopy" OS_STR2UNICODE = 2 # "str.str2unicode" OS_SHRINK_ARRAY = 3 # rgc.ll_shrink_array + OS_DICT_LOOKUP = 4 # ll_dict_lookup # OS_STR_CONCAT = 22 # "stroruni.concat" OS_STR_SLICE = 23 # "stroruni.slice" @@ -88,7 +89,7 @@ # for debugging: _OS_CANRAISE = set([ OS_NONE, OS_STR2UNICODE, OS_LIBFFI_CALL, OS_RAW_MALLOC_VARSIZE_CHAR, - OS_JIT_FORCE_VIRTUAL, OS_SHRINK_ARRAY, + OS_JIT_FORCE_VIRTUAL, OS_SHRINK_ARRAY, OS_DICT_LOOKUP, ]) def __new__(cls, readonly_descrs_fields, readonly_descrs_arrays, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -403,6 +403,8 @@ prepare = self._handle_math_sqrt_call elif oopspec_name.startswith('rgc.'): prepare = self._handle_rgc_call + elif oopspec_name == 'dict.lookup': + prepare = self._handle_dict_lookup_call else: prepare = self.prepare_builtin_call try: @@ -1848,6 +1850,10 @@ return self._handle_oopspec_call(op, args, EffectInfo.OS_MATH_SQRT, EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + def _handle_dict_lookup_call(self, op, oopspec_name, args): + return self._handle_oopspec_call(op, args, EffectInfo.OS_DICT_LOOKUP, + EffectInfo.EF_CAN_RAISE) + def _handle_rgc_call(self, op, oopspec_name, args): if oopspec_name == 'rgc.ll_shrink_array': return self._handle_oopspec_call(op, args, EffectInfo.OS_SHRINK_ARRAY, EffectInfo.EF_CAN_RAISE) diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -1,5 +1,6 @@ import os +from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import Const from rpython.jit.metainterp.jitexc import JitException from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS @@ -168,6 +169,7 @@ self.cached_fields = {} # cached array items: {array descr: {index: CachedField}} self.cached_arrayitems = {} + self.cached_dict_reads = {} # self._lazy_setfields_and_arrayitems = [] self._remove_guard_not_invalidated = False @@ -277,6 +279,20 @@ self.force_all_lazy_setfields_and_arrayitems() self.clean_caches() + def optimize_CALL(self, op): + # dispatch based on 'oopspecindex' to a method that handles + # specifically the given oopspec call. For non-oopspec calls, + # oopspecindex is just zero. + effectinfo = op.getdescr().get_extra_info() + oopspecindex = effectinfo.oopspecindex + if oopspecindex == EffectInfo.OS_DICT_LOOKUP: + if self._optimize_CALL_DICT_LOOKUP(op): + return + self.emit_operation(op) + + def _optimize_CALL_DICT_LOOKUP(self, op): + xxx + def force_from_effectinfo(self, effectinfo): # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -193,6 +193,21 @@ self.check_simple_loop({'int_sub': 1, 'int_gt': 1, 'guard_true': 1, 'jump': 1}) + def test_dict_two_lookups(self): + driver = JitDriver(greens = [], reds = 'auto') + d = {'a': 3, 'b': 4} + indexes = ['a', 'b'] + + def f(n): + s = 0 + while n > 0: + driver.jit_merge_point() + s += d[indexes[n & 1]] + s += d[indexes[n & 1]] + n -= 1 + return s + + self.meta_interp(f, [10]) class TestLLtype(DictTests, LLJitMixin): pass diff --git a/rpython/rtyper/lltypesystem/rdict.py b/rpython/rtyper/lltypesystem/rdict.py --- a/rpython/rtyper/lltypesystem/rdict.py +++ b/rpython/rtyper/lltypesystem/rdict.py @@ -569,6 +569,7 @@ PERTURB_SHIFT = 5 @jit.look_inside_iff(lambda d, key, hash: jit.isvirtual(d) and jit.isconstant(key)) + at jit.oopspec('dict.lookup(d, key, hash)') def ll_dict_lookup(d, key, hash): entries = d.entries ENTRIES = lltype.typeOf(entries).TO From noreply at buildbot.pypy.org Mon Mar 17 10:48:28 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 17 Mar 2014 10:48:28 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: pass the first test Message-ID: <20140317094828.811D31C35F0@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70007:ae17d4126ade Date: 2014-03-17 11:46 +0200 http://bitbucket.org/pypy/pypy/changeset/ae17d4126ade/ Log: pass the first test diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -1,6 +1,7 @@ import os from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.jit.metainterp.optimizeopt.util import args_dict from rpython.jit.metainterp.history import Const from rpython.jit.metainterp.jitexc import JitException from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS @@ -169,6 +170,7 @@ self.cached_fields = {} # cached array items: {array descr: {index: CachedField}} self.cached_arrayitems = {} + # cached dict items: {dict descr: {(optval, index): box-or-const}} self.cached_dict_reads = {} # self._lazy_setfields_and_arrayitems = [] @@ -177,9 +179,11 @@ self.postponed_op = None def force_at_end_of_preamble(self): + self.cached_dict_reads.clear() self.force_all_lazy_setfields_and_arrayitems() def flush(self): + self.cached_dict_reads.clear() self.force_all_lazy_setfields_and_arrayitems() if self.postponed_op: postponed_op = self.postponed_op @@ -208,6 +212,7 @@ del self._lazy_setfields_and_arrayitems[:] self.cached_fields.clear() self.cached_arrayitems.clear() + self.cached_dict_reads.clear() def field_cache(self, descr): try: @@ -291,7 +296,22 @@ self.emit_operation(op) def _optimize_CALL_DICT_LOOKUP(self, op): - xxx + args = self.optimizer.make_args_key(op) + descr = op.getdescr() + res_v = self.getvalue(op.result) + if descr in self.cached_dict_reads: + d = self.cached_dict_reads[descr] + else: + d = args_dict() + try: + res_v = d[args] + self.optimizer.make_equal_to(op.result, res_v, True) + res = True + except KeyError: + d[args] = res_v + res = False + self.cached_dict_reads[descr] = d + return res def force_from_effectinfo(self, effectinfo): # XXX we can get the wrong complexity here, if the lists From noreply at buildbot.pypy.org Mon Mar 17 10:55:20 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 17 Mar 2014 10:55:20 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: fix guard_exception and write a failing test Message-ID: <20140317095520.84E1B1C362D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70008:0fdf94cb8da0 Date: 2014-03-17 11:54 +0200 http://bitbucket.org/pypy/pypy/changeset/0fdf94cb8da0/ Log: fix guard_exception and write a failing test diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -177,6 +177,7 @@ self._remove_guard_not_invalidated = False self._seen_guard_not_invalidated = False self.postponed_op = None + self.remove_next_guard = False def force_at_end_of_preamble(self): self.cached_dict_reads.clear() @@ -306,6 +307,7 @@ try: res_v = d[args] self.optimizer.make_equal_to(op.result, res_v, True) + self.remove_next_guard = True res = True except KeyError: d[args] = res_v @@ -313,6 +315,14 @@ self.cached_dict_reads[descr] = d return res + def optimize_GUARD_NO_EXCEPTION(self, op): + if self.remove_next_guard: + self.remove_next_guard = False + return + self.emit_operation(op) + + optimize_GUARD_EXCEPTION = optimize_GUARD_NO_EXCEPTION + def force_from_effectinfo(self, effectinfo): # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5434,6 +5434,21 @@ """ self.optimize_loop(ops, expected) + def test_consecutive_getinteriorfields(self): + py.test.skip("we want this to pass") + ops = """ + [p0, i0] + i1 = getinteriorfield_gc(p0, i0, descr=valuedescr) + i2 = getinteriorfield_gc(p0, i0, descr=valuedescr) + jump(i1, i2) + """ + expected = """ + [p0, i0] + i1 = getinteriorfield_gc(p0, i0, descr=valuedescr) + jump(i1, i1) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -208,6 +208,8 @@ return s self.meta_interp(f, [10]) + self.check_simple_loop(call=1, getinteriorfield_gc=1, + guard_no_exception=1) class TestLLtype(DictTests, LLJitMixin): pass From noreply at buildbot.pypy.org Mon Mar 17 12:31:56 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 17 Mar 2014 12:31:56 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: A failing test (for stupid reasons) Message-ID: <20140317113156.099261D2824@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70009:657f9e5f56a7 Date: 2014-03-17 13:31 +0200 http://bitbucket.org/pypy/pypy/changeset/657f9e5f56a7/ Log: A failing test (for stupid reasons) diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -208,6 +208,27 @@ return s self.meta_interp(f, [10]) + # XXX should be one getinteriorfield_gc + self.check_simple_loop(call=1, getinteriorfield_gc=2, + guard_no_exception=1) + + def test_dict_insert_invalidates_caches(self): + driver = JitDriver(greens = [], reds = 'auto') + d = {'a': 3, 'b': 4} + indexes = ['a', 'b'] + + def f(n): + s = 0 + while n > 0: + driver.jit_merge_point() + index = indexes[n & 1] + s += d[index] + d['aa'] = 13 # this will invalidate the index + s += d[index] + n -= 1 + return s + + self.meta_interp(f, [10]) self.check_simple_loop(call=1, getinteriorfield_gc=1, guard_no_exception=1) From noreply at buildbot.pypy.org Mon Mar 17 13:35:52 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 17 Mar 2014 13:35:52 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: fix and write more tests Message-ID: <20140317123552.F04F81C357E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70010:b828e827b47f Date: 2014-03-17 14:35 +0200 http://bitbucket.org/pypy/pypy/changeset/b828e827b47f/ Log: fix and write more tests diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -178,7 +178,7 @@ return (fnaddr, calldescr) def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None): + extraeffect=None, extradescr=None): """Return the calldescr that describes all calls done by 'op'. This returns a calldescr that we can put in the corresponding call operation in the calling jitcode. It gets an effectinfo @@ -259,6 +259,7 @@ effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op, self.seen), self.cpu, extraeffect, oopspecindex, can_invalidate, call_release_gil_target, + extradescr, ) # assert effectinfo is not None diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -97,7 +97,8 @@ extraeffect=EF_CAN_RAISE, oopspecindex=OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL): + call_release_gil_target=llmemory.NULL, + extradescr=None): key = (frozenset_or_none(readonly_descrs_fields), frozenset_or_none(readonly_descrs_arrays), frozenset_or_none(write_descrs_fields), @@ -133,6 +134,7 @@ result.extraeffect = extraeffect result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex + result.extradescr = extradescr result.call_release_gil_target = call_release_gil_target if result.check_can_raise(): assert oopspecindex in cls._OS_CANRAISE @@ -173,7 +175,8 @@ extraeffect=EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL): + call_release_gil_target=llmemory.NULL, + extradescr=None): from rpython.translator.backendopt.writeanalyze import top_set if effects is top_set or extraeffect == EffectInfo.EF_RANDOM_EFFECTS: readonly_descrs_fields = None @@ -222,7 +225,8 @@ extraeffect, oopspecindex, can_invalidate, - call_release_gil_target) + call_release_gil_target, + extradescr) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1684,9 +1684,11 @@ # ---------- # Strings and Unicodes. - def _handle_oopspec_call(self, op, args, oopspecindex, extraeffect=None): + def _handle_oopspec_call(self, op, args, oopspecindex, extraeffect=None, + extradescr=None): calldescr = self.callcontrol.getcalldescr(op, oopspecindex, - extraeffect) + extraeffect, + extradescr=extradescr) if extraeffect is not None: assert (is_test_calldescr(calldescr) # for tests or calldescr.get_extra_info().extraeffect == extraeffect) @@ -1851,8 +1853,11 @@ EffectInfo.EF_ELIDABLE_CANNOT_RAISE) def _handle_dict_lookup_call(self, op, oopspec_name, args): + extradescr = self.cpu.fielddescrof(op.args[1].concretetype.TO, + 'entries') return self._handle_oopspec_call(op, args, EffectInfo.OS_DICT_LOOKUP, - EffectInfo.EF_CAN_RAISE) + EffectInfo.EF_CAN_RAISE, + extradescr=extradescr) def _handle_rgc_call(self, op, oopspec_name, args): if oopspec_name == 'rgc.ll_shrink_array': diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -298,7 +298,7 @@ def _optimize_CALL_DICT_LOOKUP(self, op): args = self.optimizer.make_args_key(op) - descr = op.getdescr() + descr = op.getdescr().extrainfo.extradescr res_v = self.getvalue(op.result) if descr in self.cached_dict_reads: d = self.cached_dict_reads[descr] @@ -331,6 +331,10 @@ for arraydescr in effectinfo.readonly_descrs_arrays: self.force_lazy_setarrayitem(arraydescr) for fielddescr in effectinfo.write_descrs_fields: + try: + del self.cached_dict_reads[fielddescr] + except KeyError: + pass self.force_lazy_setfield(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -214,23 +214,42 @@ def test_dict_insert_invalidates_caches(self): driver = JitDriver(greens = [], reds = 'auto') - d = {'a': 3, 'b': 4} - indexes = ['a', 'b'] + indexes = ['aa', 'b', 'cc'] def f(n): + d = {'aa': 3, 'b': 4, 'cc': 5} s = 0 while n > 0: driver.jit_merge_point() index = indexes[n & 1] s += d[index] - d['aa'] = 13 # this will invalidate the index + d['aa'] += 1 # this will invalidate the index s += d[index] n -= 1 return s - self.meta_interp(f, [10]) - self.check_simple_loop(call=1, getinteriorfield_gc=1, - guard_no_exception=1) + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_simple_loop(call=5) + + def test_dict_double_lookup_2(self): + driver = JitDriver(greens = [], reds = 'auto') + indexes = ['aa', 'b', 'cc'] + + def f(n): + d = {'aa': 3, 'b': 4, 'cc': 5} + s = 0 + while n > 0: + driver.jit_merge_point() + index = indexes[n & 1] + s += d[index] + d[index] += 1 + n -= 1 + return s + + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_simple_loop(call=3) class TestLLtype(DictTests, LLJitMixin): pass From noreply at buildbot.pypy.org Mon Mar 17 13:38:37 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 13:38:37 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_pypy_c/test_ffi.py Message-ID: <20140317123837.698131C357E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70011:3da8cfa97734 Date: 2014-03-17 08:37 -0400 http://bitbucket.org/pypy/pypy/changeset/3da8cfa97734/ Log: fix test_pypy_c/test_ffi.py diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -342,12 +342,10 @@ p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=) guard_no_exception(descr=...) i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) - setfield_gc(p167, 0, descr=) - setfield_gc(p167, ConstPtr(ptr86), descr=) - guard_no_exception(descr=...) i169 = int_add(i168, i97) i170 = int_sub(i160, i106) setfield_gc(p167, i168, descr=) + setfield_gc(p167, ConstPtr(null), descr=) setfield_gc(p167, ConstPtr(ptr89), descr=) i171 = uint_gt(i170, i108) guard_false(i171, descr=...) From noreply at buildbot.pypy.org Mon Mar 17 14:11:35 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 17 Mar 2014 14:11:35 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: fix for ordereddicts too Message-ID: <20140317131135.11DF81C11A4@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70012:cc34a5b32a04 Date: 2014-03-17 15:10 +0200 http://bitbucket.org/pypy/pypy/changeset/cc34a5b32a04/ Log: fix for ordereddicts too diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -403,7 +403,8 @@ prepare = self._handle_math_sqrt_call elif oopspec_name.startswith('rgc.'): prepare = self._handle_rgc_call - elif oopspec_name == 'dict.lookup': + elif oopspec_name.endswith('dict.lookup'): + # also ordereddict.lookup prepare = self._handle_dict_lookup_call else: prepare = self.prepare_builtin_call diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -212,6 +212,28 @@ self.check_simple_loop(call=1, getinteriorfield_gc=2, guard_no_exception=1) + + def test_ordered_dict_two_lookups(self): + driver = JitDriver(greens = [], reds = 'auto') + d = OrderedDict() + d['a'] = 3 + d['b'] = 4 + indexes = ['a', 'b'] + + def f(n): + s = 0 + while n > 0: + driver.jit_merge_point() + s += d[indexes[n & 1]] + s += d[indexes[n & 1]] + n -= 1 + return s + + self.meta_interp(f, [10]) + # XXX should be one getinteriorfield_gc + self.check_simple_loop(call=1, getinteriorfield_gc=2, + guard_no_exception=1) + def test_dict_insert_invalidates_caches(self): driver = JitDriver(greens = [], reds = 'auto') indexes = ['aa', 'b', 'cc'] diff --git a/rpython/rtyper/lltypesystem/rbuilder.py b/rpython/rtyper/lltypesystem/rbuilder.py --- a/rpython/rtyper/lltypesystem/rbuilder.py +++ b/rpython/rtyper/lltypesystem/rbuilder.py @@ -85,6 +85,7 @@ ll_builder.used = needed @staticmethod + @enforceargs(None, lltype.Char) def ll_append_char(ll_builder, char): if ll_builder.used == ll_builder.allocated: ll_builder.grow(ll_builder, 1) diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -709,6 +709,7 @@ @jit.look_inside_iff(lambda d, key, hash, store_flag, T: jit.isvirtual(d) and jit.isconstant(key)) + at jit.oopspec('ordereddict.lookup(d, key, hash, store_flag, T)') def ll_dict_lookup(d, key, hash, store_flag, T): INDEXES = _ll_ptr_to_array_of(T) entries = d.entries From noreply at buildbot.pypy.org Mon Mar 17 14:27:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 14:27:23 +0100 (CET) Subject: [pypy-commit] pypy default: virtual raw buffers force using raw_store rather than int_add + setarrayitem_raw Message-ID: <20140317132723.1AEA21C362D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70013:5da6957bd86b Date: 2014-03-17 09:20 -0400 http://bitbucket.org/pypy/pypy/changeset/5da6957bd86b/ Log: virtual raw buffers force using raw_store rather than int_add + setarrayitem_raw diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1785,11 +1785,12 @@ def test_virtual_raw_malloc_force(self): ops = """ [i1] - i2 = call('malloc', 10, descr=raw_malloc_descr) + i2 = call('malloc', 20, descr=raw_malloc_descr) guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr_char) setarrayitem_raw(i2, 2, 456, descr=rawarraydescr_char) setarrayitem_raw(i2, 1, 123, descr=rawarraydescr_char) + setarrayitem_raw(i2, 1, 789, descr=rawarraydescr_float) label('foo') # we expect the buffer to be forced *after* the label escape(i2) call('free', i2, descr=raw_free_descr) @@ -1798,13 +1799,12 @@ expected = """ [i1] label('foo') - i2 = call('malloc', 10, descr=raw_malloc_descr) + i2 = call('malloc', 20, descr=raw_malloc_descr) guard_no_exception() [] - setarrayitem_raw(i2, 0, i1, descr=rawarraydescr_char) - i3 = int_add(i2, 1) - setarrayitem_raw(i3, 0, 123, descr=rawarraydescr_char) - i4 = int_add(i2, 2) - setarrayitem_raw(i4, 0, 456, descr=rawarraydescr_char) + raw_store(i2, 0, i1, descr=rawarraydescr_char) + raw_store(i2, 1, 123, descr=rawarraydescr_char) + raw_store(i2, 2, 456, descr=rawarraydescr_char) + raw_store(i2, 8, 789, descr=rawarraydescr_float) escape(i2) call('free', i2, descr=raw_free_descr) jump(i1) @@ -1827,7 +1827,7 @@ label('foo') i2 = call('malloc', 10, descr=raw_malloc_descr) guard_no_exception() [] - setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) + raw_store(i2, 0, i1, descr=rawarraydescr) setarrayitem_raw(i2, 2, 456, descr=rawarraydescr_char) call('free', i2, descr=raw_free_descr) jump(i1) @@ -1850,7 +1850,7 @@ label('foo') i2 = call('malloc', 10, descr=raw_malloc_descr) guard_no_exception() [] - setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) + raw_store(i2, 0, i1, descr=rawarraydescr) i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr_char) call('free', i2, descr=raw_free_descr) jump(i1) @@ -1913,9 +1913,8 @@ # these ops are generated by VirtualRawBufferValue._really_force i2 = call('malloc', 10, descr=raw_malloc_descr) guard_no_exception() [] - setarrayitem_raw(i2, 0, 42, descr=rawarraydescr_char) - i3 = int_add(i2, 5) # 1+4*sizeof(char) - setarrayitem_raw(i3, 0, 4242, descr=rawarraydescr_char) + raw_store(i2, 0, 42, descr=rawarraydescr_char) + raw_store(i2, 5, 4242, descr=rawarraydescr_char) # this is generated by VirtualRawSliceValue._really_force i4 = int_add(i2, 1) escape(i4) @@ -1943,7 +1942,7 @@ label('foo') i3 = call('malloc', 10, descr=raw_malloc_descr) guard_no_exception() [] - setarrayitem_raw(i3, 0, i2, descr=rawarraydescr) + raw_store(i3, 0, i2, descr=rawarraydescr) jump(i3) """ self.optimize_loop(ops, expected) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -432,22 +432,13 @@ optforce.emit_operation(self.source_op) self.box = self.source_op.result for i in range(len(self.buffer.offsets)): - # get a pointer to self.box+offset + # write the value offset = self.buffer.offsets[i] - if offset == 0: - arraybox = self.box - else: - arraybox = BoxInt() - op = ResOperation(rop.INT_ADD, - [self.box, ConstInt(offset)], arraybox) - optforce.emit_operation(op) - # - # write the value descr = self.buffer.descrs[i] itemvalue = self.buffer.values[i] itembox = itemvalue.force_box(optforce) - op = ResOperation(rop.SETARRAYITEM_RAW, - [arraybox, ConstInt(0), itembox], None, + op = ResOperation(rop.RAW_STORE, + [self.box, ConstInt(offset), itembox], None, descr=descr) optforce.emit_operation(op) diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -1196,7 +1196,7 @@ assert res == 45 # make sure that the raw buffer is *not* virtualized because we do not # support virtualstate - self.check_resops(getarrayitem_raw=2, setarrayitem_raw=2) + self.check_resops(getarrayitem_raw=2, raw_store=2) def test_raw_malloc_only_chars(self): mydriver = JitDriver(greens=[], reds = 'auto') From noreply at buildbot.pypy.org Mon Mar 17 14:31:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 14:31:55 +0100 (CET) Subject: [pypy-commit] pypy default: passing test for virtualization of setarrayitem_raw followed by raw_load Message-ID: <20140317133155.CB8821C362D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70014:2b667d34e6cd Date: 2014-03-17 09:30 -0400 http://bitbucket.org/pypy/pypy/changeset/2b667d34e6cd/ Log: passing test for virtualization of setarrayitem_raw followed by raw_load diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1983,6 +1983,24 @@ """ self.optimize_loop(ops, expected) + def test_virtual_setarrayitem_raw_raw_load(self): + ops = """ + [f1] + i0 = call('malloc', 16, descr=raw_malloc_descr) + guard_no_exception() [] + setarrayitem_raw(i0, 1, f1, descr=rawarraydescr_float) + f2 = raw_load(i0, 8, descr=rawarraydescr_float) + f3 = float_add(f1, f2) + call('free', i0, descr=raw_free_descr) + jump(f3) + """ + expected = """ + [f1] + f2 = float_add(f1, f1) + jump(f2) + """ + self.optimize_loop(ops, expected) + def test_duplicate_getfield_1(self): ops = """ [p1, p2] From noreply at buildbot.pypy.org Mon Mar 17 15:03:58 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 17 Mar 2014 15:03:58 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: fixes Message-ID: <20140317140358.6E4591C11A4@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70015:b6d875f4bbaf Date: 2014-03-17 16:00 +0200 http://bitbucket.org/pypy/pypy/changeset/b6d875f4bbaf/ Log: fixes diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -298,7 +298,7 @@ def _optimize_CALL_DICT_LOOKUP(self, op): args = self.optimizer.make_args_key(op) - descr = op.getdescr().extrainfo.extradescr + descr = op.getdescr().get_extra_info().extradescr res_v = self.getvalue(op.result) if descr in self.cached_dict_reads: d = self.cached_dict_reads[descr] @@ -306,7 +306,7 @@ d = args_dict() try: res_v = d[args] - self.optimizer.make_equal_to(op.result, res_v, True) + self.make_equal_to(op.result, res_v) self.remove_next_guard = True res = True except KeyError: diff --git a/rpython/rtyper/lltypesystem/rbuilder.py b/rpython/rtyper/lltypesystem/rbuilder.py --- a/rpython/rtyper/lltypesystem/rbuilder.py +++ b/rpython/rtyper/lltypesystem/rbuilder.py @@ -85,14 +85,6 @@ ll_builder.used = needed @staticmethod - @enforceargs(None, lltype.Char) - def ll_append_char(ll_builder, char): - if ll_builder.used == ll_builder.allocated: - ll_builder.grow(ll_builder, 1) - ll_builder.buf.chars[ll_builder.used] = char - ll_builder.used += 1 - - @staticmethod def ll_append_slice(ll_builder, ll_str, start, end): needed = end - start used = ll_builder.used @@ -114,6 +106,7 @@ ll_builder.used = used @staticmethod + @enforceargs(None, None, int) def ll_append_charpsize(ll_builder, charp, size): used = ll_builder.used if used + size > ll_builder.allocated: @@ -139,6 +132,14 @@ return ll_builder != nullptr(cls.lowleveltype.TO) class StringBuilderRepr(BaseStringBuilderRepr): + @staticmethod + @enforceargs(None, lltype.Char) + def ll_append_char(ll_builder, char): + if ll_builder.used == ll_builder.allocated: + ll_builder.grow(ll_builder, 1) + ll_builder.buf.chars[ll_builder.used] = char + ll_builder.used += 1 + lowleveltype = lltype.Ptr(STRINGBUILDER) basetp = STR mallocfn = staticmethod(rstr.mallocstr) @@ -149,6 +150,15 @@ ) class UnicodeBuilderRepr(BaseStringBuilderRepr): + + @staticmethod + @enforceargs(None, lltype.UniChar) + def ll_append_char(ll_builder, char): + if ll_builder.used == ll_builder.allocated: + ll_builder.grow(ll_builder, 1) + ll_builder.buf.chars[ll_builder.used] = char + ll_builder.used += 1 + lowleveltype = lltype.Ptr(UNICODEBUILDER) basetp = UNICODE mallocfn = staticmethod(rstr.mallocunicode) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -4,7 +4,7 @@ from rpython.rlib import jit, types from rpython.rlib.debug import ll_assert from rpython.rlib.objectmodel import (malloc_zero_filled, we_are_translated, - _hash_string, keepalive_until_here, specialize) + _hash_string, keepalive_until_here, specialize, enforceargs) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.error import TyperError @@ -108,6 +108,7 @@ copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name) @jit.dont_look_inside + @enforceargs(None, None, None, int) def copy_raw_to_string(ptrsrc, dst, dststart, length): # xxx Warning: same note as above apply: don't do this at home assert length >= 0 From noreply at buildbot.pypy.org Mon Mar 17 15:51:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 15:51:07 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup repr of resops with descr but no args Message-ID: <20140317145107.D81AA1D27BF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70016:084cf842f3c1 Date: 2014-03-17 10:49 -0400 http://bitbucket.org/pypy/pypy/changeset/084cf842f3c1/ Log: cleanup repr of resops with descr but no args diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -119,10 +119,11 @@ descr = self.getdescr() if descr is None or we_are_translated(): return '%s%s%s(%s)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in args])) + ', '.join([str(a) for a in args])) else: - return '%s%s%s(%s, descr=%r)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in args]), descr) + return '%s%s%s(%s)' % (prefix, sres, self.getopname(), + ', '.join([str(a) for a in args] + + ['descr=%r' % descr])) def getopname(self): try: diff --git a/rpython/jit/metainterp/test/test_resoperation.py b/rpython/jit/metainterp/test/test_resoperation.py --- a/rpython/jit/metainterp/test/test_resoperation.py +++ b/rpython/jit/metainterp/test/test_resoperation.py @@ -1,6 +1,7 @@ import py +import re from rpython.jit.metainterp import resoperation as rop -from rpython.jit.metainterp.history import AbstractDescr +from rpython.jit.metainterp.history import AbstractDescr, AbstractFailDescr def test_arity_mixins(): cases = [ @@ -55,12 +56,18 @@ op = rop.ResOperation(rop.rop.INT_ADD, ['a', 'b'], 'c') assert op.getarglist() == ['a', 'b'] assert op.result == 'c' + assert repr(op) == "c = int_add(a, b)" mydescr = AbstractDescr() op = rop.ResOperation(rop.rop.CALL, ['a', 'b'], 'c', descr=mydescr) assert op.getarglist() == ['a', 'b'] assert op.result == 'c' assert op.getdescr() is mydescr + assert re.match("c = call\(a, b, descr=<.+>\)$", repr(op)) + + mydescr = AbstractFailDescr() + op = rop.ResOperation(rop.rop.GUARD_NO_EXCEPTION, [], None, descr=mydescr) + assert re.match("guard_no_exception\(descr=<.+>\)$", repr(op)) def test_can_malloc(): mydescr = AbstractDescr() From noreply at buildbot.pypy.org Mon Mar 17 15:52:14 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 17 Mar 2014 15:52:14 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: maybe fix translation Message-ID: <20140317145214.65AA31D27BF@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70017:7bef0e7352f9 Date: 2014-03-17 16:51 +0200 http://bitbucket.org/pypy/pypy/changeset/7bef0e7352f9/ Log: maybe fix translation diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -1,7 +1,7 @@ import os from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.jit.metainterp.optimizeopt.util import args_dict +from rpython.jit.metainterp.optimizeopt.util import args_dict_value from rpython.jit.metainterp.history import Const from rpython.jit.metainterp.jitexc import JitException from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS @@ -303,7 +303,7 @@ if descr in self.cached_dict_reads: d = self.cached_dict_reads[descr] else: - d = args_dict() + d = args_dict_value() try: res_v = d[args] self.make_equal_to(op.result, res_v) diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -92,6 +92,7 @@ # ____________________________________________________________ +def _new_args_dict(): def args_eq(args1, args2): make_sure_not_resized(args1) make_sure_not_resized(args2) @@ -124,6 +125,9 @@ def args_dict_box(): return r_dict(args_eq, args_hash) +def args_dict_value(): + return r_dict(args_eq, args_hash) + # ____________________________________________________________ diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -108,7 +108,6 @@ copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name) @jit.dont_look_inside - @enforceargs(None, None, None, int) def copy_raw_to_string(ptrsrc, dst, dststart, length): # xxx Warning: same note as above apply: don't do this at home assert length >= 0 @@ -120,9 +119,10 @@ llmemory.raw_memcopy(srcbuf, dst, llmemory.sizeof(CHAR_TP) * length) # end of "no GC" section keepalive_until_here(dst) - copy_raw_to_string._always_inline_ = True copy_raw_to_string = func_with_new_name(copy_raw_to_string, 'copy_raw_to_%s' % name) + copy_raw_to_string._always_inline_ = True + copy_raw_to_string._annenforceargs_ = (None, None, int, int) return copy_string_to_raw, copy_raw_to_string, copy_string_contents From noreply at buildbot.pypy.org Mon Mar 17 16:01:22 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 17 Mar 2014 16:01:22 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: ups Message-ID: <20140317150122.09B681D27C0@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70018:eef1a9f78543 Date: 2014-03-17 17:00 +0200 http://bitbucket.org/pypy/pypy/changeset/eef1a9f78543/ Log: ups diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -92,7 +92,6 @@ # ____________________________________________________________ -def _new_args_dict(): def args_eq(args1, args2): make_sure_not_resized(args1) make_sure_not_resized(args2) From noreply at buildbot.pypy.org Mon Mar 17 16:27:58 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 16:27:58 +0100 (CET) Subject: [pypy-commit] pypy default: unskip this test Message-ID: <20140317152758.8F4BA1C0166@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70019:10739383a275 Date: 2014-03-17 11:26 -0400 http://bitbucket.org/pypy/pypy/changeset/10739383a275/ Log: unskip this test diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2872,7 +2872,6 @@ py.test.raises(Exception, a.build_types, fun, [s_nonneg, int]) def test_sig_bug(self): - py.test.skip("_annenforceargs_ does not work for default arguments") def g(x, y=5): return y == 5 g._annenforceargs_ = (int, int) @@ -2880,7 +2879,8 @@ return g(x) a = self.RPythonAnnotator() s = a.build_types(fun, [int]) - assert not s.is_constant() + assert s.knowntype is bool + assert s.is_constant() def test_sig_list(self): def g(buf): From noreply at buildbot.pypy.org Mon Mar 17 19:23:21 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 17 Mar 2014 19:23:21 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: I think this expresses the logic much more clearly Message-ID: <20140317182321.551661C12F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: improve-consecutive-dict-lookups Changeset: r70020:325e14172b33 Date: 2014-03-17 19:22 +0100 http://bitbucket.org/pypy/pypy/changeset/325e14172b33/ Log: I think this expresses the logic much more clearly diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -299,20 +299,20 @@ def _optimize_CALL_DICT_LOOKUP(self, op): args = self.optimizer.make_args_key(op) descr = op.getdescr().get_extra_info().extradescr - res_v = self.getvalue(op.result) if descr in self.cached_dict_reads: d = self.cached_dict_reads[descr] else: d = args_dict_value() + self.cached_dict_reads[descr] = d try: res_v = d[args] + except KeyError: + d[args] = self.getvalue(op.result) + res = False + else: self.make_equal_to(op.result, res_v) self.remove_next_guard = True res = True - except KeyError: - d[args] = res_v - res = False - self.cached_dict_reads[descr] = d return res def optimize_GUARD_NO_EXCEPTION(self, op): From noreply at buildbot.pypy.org Mon Mar 17 21:07:53 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 17 Mar 2014 21:07:53 +0100 (CET) Subject: [pypy-commit] pypy default: win32 fix Message-ID: <20140317200753.596791C00B9@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70021:a3b81c345e03 Date: 2014-03-17 20:53 +0200 http://bitbucket.org/pypy/pypy/changeset/a3b81c345e03/ Log: win32 fix diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -9,8 +9,12 @@ class AppTestBufferTooShort: spaceconfig = {'usemodules': ['_multiprocessing', 'thread', 'signal', - 'itertools', 'select', 'fcntl', 'struct', - 'binascii']} + 'itertools', 'select', 'struct', 'binascii']} + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_rawffi') + else: + spaceconfig['usemodules'].append('fcntl') + def setup_class(cls): if cls.runappdirect: @@ -100,9 +104,12 @@ spaceconfig = { "usemodules": [ '_multiprocessing', 'thread', 'signal', 'struct', 'array', - 'itertools', '_socket', 'binascii', 'select', 'fcntl', - ] + 'itertools', '_socket', 'binascii', 'select' ] } + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_rawffi') + else: + spaceconfig['usemodules'].append('fcntl') def setup_class(cls): cls.w_connections = cls.space.newlist([]) From noreply at buildbot.pypy.org Mon Mar 17 21:07:54 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 17 Mar 2014 21:07:54 +0100 (CET) Subject: [pypy-commit] pypy default: win32 fix Message-ID: <20140317200754.847371C00B9@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70022:2057b7ec2690 Date: 2014-03-17 22:06 +0200 http://bitbucket.org/pypy/pypy/changeset/2057b7ec2690/ Log: win32 fix diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -79,6 +79,8 @@ 'itertools', '_socket', 'binascii', ] } + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_rawffi') def setup_class(cls): if sys.platform != "win32": @@ -90,7 +92,6 @@ # just for multiprocessing to import correctly on Windows w_modules = space.sys.get('modules') space.setitem(w_modules, space.wrap('msvcrt'), space.sys) - space.setitem(w_modules, space.wrap('_subprocess'), space.sys) else: import _multiprocessing From noreply at buildbot.pypy.org Mon Mar 17 21:19:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 21:19:20 +0100 (CET) Subject: [pypy-commit] pypy default: cleanups Message-ID: <20140317201920.3CA721C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70023:c3ed9ebe70f4 Date: 2014-03-17 11:56 -0400 http://bitbucket.org/pypy/pypy/changeset/c3ed9ebe70f4/ Log: cleanups diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1661,7 +1661,7 @@ self.optimize_loop(ops, ops) def test_setfield_int_eq_result(self): - # test that the setfield_gc does not end up before int_eq + # test that the setfield_gc does not end up before int_eq ops = """ [p1, i1, i2] i3 = int_eq(i1, i2) @@ -5443,6 +5443,7 @@ jump(i0) """ self.optimize_loop(ops, expected) - + + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -1,7 +1,7 @@ import itertools import py -from rpython.rlib.objectmodel import r_dict, compute_identity_hash +from rpython.rlib.objectmodel import r_dict, compute_identity_hash, specialize from rpython.rlib.rarithmetic import intmask from rpython.rlib.unroll import unrolling_iterable from rpython.jit.metainterp import resoperation @@ -118,13 +118,10 @@ res = intmask((1000003 * res) ^ y) return res + at specialize.call_location() def args_dict(): return r_dict(args_eq, args_hash) -def args_dict_box(): - return r_dict(args_eq, args_hash) - - # ____________________________________________________________ def equaloplists(oplist1, oplist2, strict_fail_args=True, remap={}, @@ -183,4 +180,3 @@ assert False assert len(oplist1) == len(oplist2) return True - diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -648,7 +648,6 @@ def optimize_SETFIELD_GC(self, op): value = self.getvalue(op.getarg(0)) - if value.is_virtual(): fieldvalue = self.getvalue(op.getarg(1)) value.setfield(op.getdescr(), fieldvalue) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -11,7 +11,7 @@ ConstFloat, Box, TargetToken) from rpython.jit.metainterp.jitprof import EmptyProfiler from rpython.jit.metainterp.logger import Logger -from rpython.jit.metainterp.optimizeopt.util import args_dict_box +from rpython.jit.metainterp.optimizeopt.util import args_dict from rpython.jit.metainterp.resoperation import rop from rpython.rlib import nonconst, rstack from rpython.rlib.debug import debug_start, debug_stop, debug_print, make_sure_not_resized @@ -1656,7 +1656,7 @@ self.forced_virtualizable = None self.partial_trace = None self.retracing_from = -1 - self.call_pure_results = args_dict_box() + self.call_pure_results = args_dict() self.heapcache = HeapCache() self.call_ids = [] @@ -1784,16 +1784,14 @@ moreargs = [box] + extraargs else: moreargs = list(extraargs) - metainterp_sd = self.staticdata if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: - resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, + resumedescr = compile.ResumeGuardForcedDescr(self.staticdata, self.jitdriver_sd) elif opnum == rop.GUARD_NOT_INVALIDATED: resumedescr = compile.ResumeGuardNotInvalidated() else: resumedescr = compile.ResumeGuardDescr() - guard_op = self.history.record(opnum, moreargs, None, - descr=resumedescr) + guard_op = self.history.record(opnum, moreargs, None, descr=resumedescr) self.capture_resumedata(resumedescr, resumepc) self.staticdata.profiler.count_ops(opnum, Counters.GUARDS) # count From noreply at buildbot.pypy.org Mon Mar 17 21:19:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 21:19:21 +0100 (CET) Subject: [pypy-commit] pypy default: guard_no_exception isn't currently produced here Message-ID: <20140317201921.876771C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70024:256ecf7cf106 Date: 2014-03-17 14:57 -0400 http://bitbucket.org/pypy/pypy/changeset/256ecf7cf106/ Log: guard_no_exception isn't currently produced here diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1800,7 +1800,7 @@ [i1] label('foo') i2 = call('malloc', 20, descr=raw_malloc_descr) - guard_no_exception() [] + #guard_no_exception() [] # XXX should appear raw_store(i2, 0, i1, descr=rawarraydescr_char) raw_store(i2, 1, 123, descr=rawarraydescr_char) raw_store(i2, 2, 456, descr=rawarraydescr_char) @@ -1826,7 +1826,7 @@ [i1] label('foo') i2 = call('malloc', 10, descr=raw_malloc_descr) - guard_no_exception() [] + #guard_no_exception() [] # XXX should appear raw_store(i2, 0, i1, descr=rawarraydescr) setarrayitem_raw(i2, 2, 456, descr=rawarraydescr_char) call('free', i2, descr=raw_free_descr) @@ -1849,7 +1849,7 @@ [i1] label('foo') i2 = call('malloc', 10, descr=raw_malloc_descr) - guard_no_exception() [] + #guard_no_exception() [] # XXX should appear raw_store(i2, 0, i1, descr=rawarraydescr) i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr_char) call('free', i2, descr=raw_free_descr) @@ -1912,7 +1912,7 @@ label('foo') # these ops are generated by VirtualRawBufferValue._really_force i2 = call('malloc', 10, descr=raw_malloc_descr) - guard_no_exception() [] + #guard_no_exception() [] # XXX should appear raw_store(i2, 0, 42, descr=rawarraydescr_char) raw_store(i2, 5, 4242, descr=rawarraydescr_char) # this is generated by VirtualRawSliceValue._really_force @@ -1941,7 +1941,7 @@ call('free', i0, descr=raw_free_descr) label('foo') i3 = call('malloc', 10, descr=raw_malloc_descr) - guard_no_exception() [] + #guard_no_exception() [] # XXX should appear raw_store(i3, 0, i2, descr=rawarraydescr) jump(i3) """ From noreply at buildbot.pypy.org Mon Mar 17 21:19:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 21:19:22 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140317201922.B44CB1C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70025:d3abcc897136 Date: 2014-03-17 16:18 -0400 http://bitbucket.org/pypy/pypy/changeset/d3abcc897136/ Log: merge heads diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -9,8 +9,12 @@ class AppTestBufferTooShort: spaceconfig = {'usemodules': ['_multiprocessing', 'thread', 'signal', - 'itertools', 'select', 'fcntl', 'struct', - 'binascii']} + 'itertools', 'select', 'struct', 'binascii']} + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_rawffi') + else: + spaceconfig['usemodules'].append('fcntl') + def setup_class(cls): if cls.runappdirect: @@ -75,6 +79,8 @@ 'itertools', '_socket', 'binascii', ] } + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_rawffi') def setup_class(cls): if sys.platform != "win32": @@ -86,7 +92,6 @@ # just for multiprocessing to import correctly on Windows w_modules = space.sys.get('modules') space.setitem(w_modules, space.wrap('msvcrt'), space.sys) - space.setitem(w_modules, space.wrap('_subprocess'), space.sys) else: import _multiprocessing @@ -100,9 +105,12 @@ spaceconfig = { "usemodules": [ '_multiprocessing', 'thread', 'signal', 'struct', 'array', - 'itertools', '_socket', 'binascii', 'select', 'fcntl', - ] + 'itertools', '_socket', 'binascii', 'select' ] } + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_rawffi') + else: + spaceconfig['usemodules'].append('fcntl') def setup_class(cls): cls.w_connections = cls.space.newlist([]) From noreply at buildbot.pypy.org Mon Mar 17 21:36:19 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 17 Mar 2014 21:36:19 +0100 (CET) Subject: [pypy-commit] pypy py3k: update comment Message-ID: <20140317203619.D1ADE1C00B9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70026:523ca391770e Date: 2014-03-17 13:31 -0700 http://bitbucket.org/pypy/pypy/changeset/523ca391770e/ Log: update comment diff --git a/lib-python/3/test/test_aifc.py b/lib-python/3/test/test_aifc.py --- a/lib-python/3/test/test_aifc.py +++ b/lib-python/3/test/test_aifc.py @@ -72,7 +72,7 @@ self.assertEqual(f.getparams(), fout.getparams()) self.assertEqual(f.readframes(5), fout.readframes(5)) - @impl_detail("PyPy has no audioop module yet", pypy=False) + @impl_detail("PyPy has no audioop.lin2ulaw yet", pypy=False) def test_compress(self): f = self.f = aifc.open(self.sndfilepath) fout = self.fout = aifc.open(TESTFN, 'wb') From noreply at buildbot.pypy.org Mon Mar 17 21:36:21 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 17 Mar 2014 21:36:21 +0100 (CET) Subject: [pypy-commit] pypy stdlib-3.2.5: add missing test entries Message-ID: <20140317203621.38F471C00B9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-3.2.5 Changeset: r70027:9bce0cf32d0f Date: 2014-03-17 13:34 -0700 http://bitbucket.org/pypy/pypy/changeset/9bce0cf32d0f/ Log: add missing test entries diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -98,7 +98,7 @@ RegrTest('test___all__.py', core=True), RegrTest('test___future__.py', core=True), RegrTest('test__locale.py', usemodules='_locale'), - #RegrTest('test__osx_support.py'), + RegrTest('test__osx_support.py'), RegrTest('test_abc.py'), RegrTest('test_abstract_numbers.py'), RegrTest('test_aifc.py'), @@ -199,6 +199,7 @@ RegrTest('test_extcall.py', core=True), RegrTest('test_fcntl.py', usemodules='fcntl'), RegrTest('test_file.py', usemodules="posix", core=True), + RegrTest('test_file_eintr.py'), RegrTest('test_filecmp.py', core=True), RegrTest('test_fileinput.py', core=True), RegrTest('test_fileio.py'), @@ -404,7 +405,7 @@ RegrTest('test_timeout.py'), RegrTest('test_tk.py'), RegrTest('test_tokenize.py'), - #RegrTest('test_tools.py'), + RegrTest('test_tools.py'), RegrTest('test_trace.py'), RegrTest('test_traceback.py', core=True), RegrTest('test_ttk_guionly.py'), From noreply at buildbot.pypy.org Mon Mar 17 21:39:51 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 21:39:51 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: merge default Message-ID: <20140317203951.3DCD01C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: improve-consecutive-dict-lookups Changeset: r70028:44f6695da32e Date: 2014-03-17 14:58 -0400 http://bitbucket.org/pypy/pypy/changeset/44f6695da32e/ Log: merge default diff too long, truncating to 2000 out of 2035 lines diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -51,16 +51,22 @@ self.config = config self.logfile = logfile # preferably line buffered - def write_log_entry(self, testpath, lettercode, longrepr): + def write_log_entry(self, testpath, lettercode, longrepr, sections=[]): py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile) for line in longrepr.splitlines(): py.builtin.print_(" %s" % line, file=self.logfile) + for key, text in sections: + py.builtin.print_(" ", file=self.logfile) + py.builtin.print_(" -------------------- %s --------------------" + % key.rstrip(), file=self.logfile) + py.builtin.print_(" %s" % (text.rstrip().replace('\n', '\n '),), + file=self.logfile) def log_outcome(self, report, lettercode, longrepr): testpath = getattr(report, 'nodeid', None) if testpath is None: testpath = report.fspath - self.write_log_entry(testpath, lettercode, longrepr) + self.write_log_entry(testpath, lettercode, longrepr, report.sections) def pytest_runtest_logreport(self, report): if report.when != "call" and report.passed: diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -38,6 +38,7 @@ if sys.version_info[0] >= 3: StandardError = Exception + cmp = lambda x, y: (x > y) - (x < y) long = int xrange = range basestring = unicode = str diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -350,7 +350,7 @@ r_sample = getsample(cp, size, i + 1) sample = (l_sample * fac1) + (r_sample * fac2) - sample = clip(sample) + sample = int(clip(sample)) _put_sample(result, size, i // 2, sample) @@ -501,7 +501,7 @@ # slice off extra bytes trim_index = (out_i * bytes_per_frame) - len(retval) - retval = _buffer(retval)[:trim_index] + retval = retval[:trim_index] return (retval, (d, tuple(samps))) diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -187,7 +187,7 @@ No, we found no way of doing that. The JIT generates machine code containing a large number of constant addresses --- constant at the time -the machine code is written. The vast majority is probably not at all +the machine code is generated. The vast majority is probably not at all constants that you find in the executable, with a nice link name. E.g. the addresses of Python classes are used all the time, but Python classes don't come statically from the executable; they are created anew @@ -212,12 +212,16 @@ garbage collection, implementation of various things like arbitrarily long integers, etc. -Currently, we have preliminary versions of a JavaScript interpreter -(Leonardo Santagada as his Summer of PyPy project), a `Prolog interpreter`_ -(Carl Friedrich Bolz as his Bachelor thesis), and a `SmallTalk interpreter`_ +Currently, we have `Topaz`_, a Ruby interpreter; `Hippy`_, a PHP +interpreter; preliminary versions of a `JavaScript interpreter`_ +(Leonardo Santagada as his Summer of PyPy project); a `Prolog interpreter`_ +(Carl Friedrich Bolz as his Bachelor thesis); and a `SmallTalk interpreter`_ (produced during a sprint). On the `PyPy bitbucket page`_ there is also a Scheme and an Io implementation; both of these are unfinished at the moment. +.. _`Topaz`: http://topazruby.com/ +.. _`Hippy`: http://morepypy.blogspot.ch/2012/07/hello-everyone.html +.. _`JavaScript interpreter`: https://bitbucket.org/pypy/lang-js/ .. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/ .. _`SmallTalk interpreter`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`PyPy bitbucket page`: https://bitbucket.org/pypy/ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -105,3 +105,6 @@ .. branch: stdlib-2.7.6 Update stdlib to v2.7.6 + +.. branch: virtual-raw-store-load +Support for virtualizing raw_store/raw_load operations diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -330,12 +330,12 @@ raises(UnicodeDecodeError, decode, r"\U00110000") assert decode(r"\U00110000", "ignore") == (u"", 10) assert decode(r"\U00110000", "replace") == (u"\ufffd", 10) - exc = raises(UnicodeDecodeError, unicode_escape_decode, "\u1z32z3", 'strict') - assert str(exc.value) == "'unicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX escape" - exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, "\u1z32z3", 'strict') - assert str(exc.value) == "'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" - exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, "\U1z32z3", 'strict') - assert str(exc.value) == "'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" + exc = raises(UnicodeDecodeError, unicode_escape_decode, b"\u1z32z3", 'strict') + assert str(exc.value) == r"'unicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX escape" + exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, b"\u1z32z3", 'strict') + assert str(exc.value) == r"'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" + exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, b"\U1z32z3", 'strict') + assert str(exc.value) == r"'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" def test_escape_encode(self): assert '"'.encode('string_escape') == '"' @@ -596,7 +596,7 @@ l = [u"<%d>" % ord(exc.object[pos]) for pos in xrange(exc.start, exc.end)] return (u"[%s]" % u"".join(l), exc.end) codecs.register_error("test.handler1", handler1) - assert "\\u3042\u3xxx".decode("unicode-escape", "test.handler1") == \ + assert b"\\u3042\u3xxx".decode("unicode-escape", "test.handler1") == \ u"\u3042[<92><117><51>]xxx" def test_encode_error_bad_handler(self): @@ -649,22 +649,22 @@ def test_utf7_errors(self): import codecs tests = [ - ('a\xffb', u'a\ufffdb'), - ('a+IK', u'a\ufffd'), - ('a+IK-b', u'a\ufffdb'), - ('a+IK,b', u'a\ufffdb'), - ('a+IKx', u'a\u20ac\ufffd'), - ('a+IKx-b', u'a\u20ac\ufffdb'), - ('a+IKwgr', u'a\u20ac\ufffd'), - ('a+IKwgr-b', u'a\u20ac\ufffdb'), - ('a+IKwgr,', u'a\u20ac\ufffd'), - ('a+IKwgr,-b', u'a\u20ac\ufffd-b'), - ('a+IKwgrB', u'a\u20ac\u20ac\ufffd'), - ('a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'), - ('a+/,+IKw-b', u'a\ufffd\u20acb'), - ('a+//,+IKw-b', u'a\ufffd\u20acb'), - ('a+///,+IKw-b', u'a\uffff\ufffd\u20acb'), - ('a+////,+IKw-b', u'a\uffff\ufffd\u20acb'), + (b'a\xffb', u'a\ufffdb'), + (b'a+IK', u'a\ufffd'), + (b'a+IK-b', u'a\ufffdb'), + (b'a+IK,b', u'a\ufffdb'), + (b'a+IKx', u'a\u20ac\ufffd'), + (b'a+IKx-b', u'a\u20ac\ufffdb'), + (b'a+IKwgr', u'a\u20ac\ufffd'), + (b'a+IKwgr-b', u'a\u20ac\ufffdb'), + (b'a+IKwgr,', u'a\u20ac\ufffd'), + (b'a+IKwgr,-b', u'a\u20ac\ufffd-b'), + (b'a+IKwgrB', u'a\u20ac\u20ac\ufffd'), + (b'a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'), + (b'a+/,+IKw-b', u'a\ufffd\u20acb'), + (b'a+//,+IKw-b', u'a\ufffd\u20acb'), + (b'a+///,+IKw-b', u'a\uffff\ufffd\u20acb'), + (b'a+////,+IKw-b', u'a\uffff\ufffd\u20acb'), ] for raw, expected in tests: raises(UnicodeDecodeError, codecs.utf_7_decode, raw, 'strict', True) diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -65,10 +65,7 @@ else: flowinfo = 0 if len(pieces_w) > 3: scope_id = space.uint_w(pieces_w[3]) else: scope_id = 0 - if flowinfo < 0 or flowinfo > 0xfffff: - raise OperationError(space.w_OverflowError, space.wrap( - "flowinfo must be 0-1048575.")) - flowinfo = rffi.cast(lltype.Unsigned, flowinfo) + flowinfo = make_unsigned_flowinfo(space, flowinfo) a = addr.lock(_c.sockaddr_in6) rffi.setintfield(a, 'c_sin6_port', rsocket.htons(port)) rffi.setintfield(a, 'c_sin6_flowinfo', rsocket.htonl(flowinfo)) @@ -97,10 +94,7 @@ else: flowinfo = 0 if len(pieces_w) > 3: scope_id = space.uint_w(pieces_w[3]) else: scope_id = 0 - if flowinfo < 0 or flowinfo > 0xfffff: - raise OperationError(space.w_OverflowError, space.wrap( - "flowinfo must be 0-1048575.")) - flowinfo = rffi.cast(lltype.Unsigned, flowinfo) + flowinfo = make_unsigned_flowinfo(space, flowinfo) return rsocket.INET6Address(host, port, flowinfo, scope_id) if rsocket.HAS_AF_UNIX and family == rsocket.AF_UNIX: return rsocket.UNIXAddress(space.str_w(w_address)) @@ -112,10 +106,16 @@ # XXX Hack to seperate rpython and pypy def make_ushort_port(space, port): if port < 0 or port > 0xffff: - raise OperationError(space.w_ValueError, space.wrap( + raise OperationError(space.w_OverflowError, space.wrap( "port must be 0-65535.")) return rffi.cast(rffi.USHORT, port) +def make_unsigned_flowinfo(space, flowinfo): + if flowinfo < 0 or flowinfo > 0xfffff: + raise OperationError(space.w_OverflowError, space.wrap( + "flowinfo must be 0-1048575.")) + return rffi.cast(lltype.Unsigned, flowinfo) + # XXX Hack to seperate rpython and pypy def ipaddr_from_object(space, w_sockaddr): host = space.str_w(space.getitem(w_sockaddr, space.wrap(0))) @@ -536,13 +536,9 @@ @unwrap_spec(family=int, type=int, proto=int) def newsocket(space, w_subtype, family=AF_INET, type=SOCK_STREAM, proto=0): - # XXX If we want to support subclassing the socket type we will need - # something along these lines. But allocate_instance is only defined - # on the standard object space, so this is not really correct. - #sock = space.allocate_instance(W_RSocket, w_subtype) - #Socket.__init__(sock, space, fd, family, type, proto) + sock = space.allocate_instance(W_RSocket, w_subtype) try: - sock = W_RSocket(family, type, proto) + W_RSocket.__init__(sock, family, type, proto) except SocketError, e: raise converted_error(space, e) return space.wrap(sock) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -399,7 +399,7 @@ name = s.getpeername() # Will raise socket.error if not connected assert name[1] == 80 s.close() - + def test_socket_connect_ex(self): import _socket s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) @@ -425,8 +425,13 @@ def test_bigport(self): import _socket s = _socket.socket() - raises(ValueError, s.connect, ("localhost", 1000000)) - raises(ValueError, s.connect, ("localhost", -1)) + exc = raises(OverflowError, s.connect, ("localhost", -1)) + assert "port must be 0-65535." in str(exc.value) + exc = raises(OverflowError, s.connect, ("localhost", 1000000)) + assert "port must be 0-65535." in str(exc.value) + s = _socket.socket(_socket.AF_INET6) + exc = raises(OverflowError, s.connect, ("::1", 1234, 1048576)) + assert "flowinfo must be 0-1048575." in str(exc.value) def test_NtoH(self): import sys @@ -474,6 +479,13 @@ import socket s = socket.socket() + def test_subclass(self): + from _socket import socket + class MySock(socket): + blah = 123 + s = MySock() + assert s.blah == 123 + def test_getsetsockopt(self): import _socket as socket import struct @@ -575,11 +587,11 @@ class AppTestSocketTCP: + HOST = 'localhost' + def setup_class(cls): cls.space = space - HOST = 'localhost' - def setup_method(self, method): w_HOST = space.wrap(self.HOST) self.w_serv = space.appexec([w_socket, w_HOST], @@ -589,6 +601,7 @@ serv.listen(1) return serv ''') + def teardown_method(self, method): if hasattr(self, 'w_serv'): space.appexec([self.w_serv], '(serv): serv.close()') @@ -609,7 +622,7 @@ raises(error, raise_error) def test_recv_send_timeout(self): - from _socket import socket, timeout + from _socket import socket, timeout, SOL_SOCKET, SO_RCVBUF, SO_SNDBUF cli = socket() cli.connect(self.serv.getsockname()) t, addr = self.serv.accept() @@ -629,6 +642,9 @@ assert count is None buf = t.recv(1) assert buf == '?' + # speed up filling the buffers + t.setsockopt(SOL_SOCKET, SO_RCVBUF, 4096) + cli.setsockopt(SOL_SOCKET, SO_SNDBUF, 4096) # test send() timeout count = 0 try: @@ -656,7 +672,7 @@ conn, addr = self.serv.accept() buf = buffer(MSG) conn.send(buf) - buf = array.array('c', ' '*1024) + buf = array.array('c', ' ' * 1024) nbytes = cli.recv_into(buf) assert nbytes == len(MSG) msg = buf.tostring()[:len(MSG)] @@ -671,7 +687,7 @@ conn, addr = self.serv.accept() buf = buffer(MSG) conn.send(buf) - buf = array.array('c', ' '*1024) + buf = array.array('c', ' ' * 1024) nbytes, addr = cli.recvfrom_into(buf) assert nbytes == len(MSG) msg = buf.tostring()[:len(MSG)] @@ -682,6 +698,7 @@ cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) assert cli.family == socket.AF_INET + class AppTestErrno: def setup_class(cls): cls.space = space diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -20,7 +20,7 @@ #define PyArrayObject PyObject #define PyArray_Descr PyObject -extern PyTypeObject PyArray_Type; +PyAPI_DATA(PyTypeObject) PyArray_Type; typedef unsigned char npy_bool; typedef unsigned char npy_uint8; diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -213,7 +213,7 @@ if rffi.aroundstate.after: # After external call is before entering Python rffi.aroundstate.after() - return 0 + return lltype.nullptr(PyGILState_STATE.TO) @cpython_api([PyGILState_STATE], lltype.Void) def PyGILState_Release(space, state): diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,6 +64,10 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] + # prevent linking with PythonXX.lib + w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2] + kwds["link_extra"] = ["/NODEFAULTLIB:Python%d%d.lib" % + (space.int_w(w_maj), space.int_w(w_min))] elif sys.platform == 'darwin': kwds["link_files"] = [str(api_library + '.dylib')] else: diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -14,12 +14,12 @@ assert 'foo' in sys.modules assert "copy" in dir(module.fooType) obj = module.new() - print obj.foo + print(obj.foo) assert obj.foo == 42 - print "Obj has type", type(obj) + print("Obj has type", type(obj)) assert type(obj) is module.fooType - print "type of obj has type", type(type(obj)) - print "type of type of obj has type", type(type(type(obj))) + print("type of obj has type", type(type(obj))) + print("type of type of obj has type", type(type(type(obj)))) assert module.fooType.__doc__ == "foo is for testing." def test_typeobject_method_descriptor(self): @@ -36,7 +36,7 @@ assert repr(module.fooType.__call__) == "" assert obj2(foo=1, bar=2) == dict(foo=1, bar=2) - print obj.foo + print(obj.foo) assert obj.foo == 42 assert obj.int_member == obj.foo @@ -592,5 +592,5 @@ def test_tp_new_in_subclass_of_type(self): skip("BROKEN") module = self.import_module(name='foo3') - print 'calling module.Type()...' + print('calling module.Type()...') module.Type("X", (object,), {}) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -6,23 +6,26 @@ interpleveldefs = { 'ndarray': 'ndarray.W_NDimArray', 'dtype': 'descriptor.W_Dtype', + 'flatiter': 'flatiter.W_FlatIterator', + '_reconstruct' : 'ndarray._reconstruct', + 'scalar' : 'ctors.build_scalar', 'array': 'ctors.array', 'zeros': 'ctors.zeros', 'empty': 'ctors.zeros', 'empty_like': 'ctors.empty_like', - '_reconstruct' : 'ndarray._reconstruct', - 'scalar' : 'ctors.build_scalar', + 'fromstring': 'ctors.fromstring', + + 'concatenate': 'arrayops.concatenate', + 'count_nonzero': 'arrayops.count_nonzero', 'dot': 'arrayops.dot', - 'fromstring': 'ctors.fromstring', - 'flatiter': 'flatiter.W_FlatIterator', - 'concatenate': 'arrayops.concatenate', 'where': 'arrayops.where', - 'count_nonzero': 'arrayops.count_nonzero', 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', } + for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: + interpleveldefs[c] = 'space.wrap(constants.%s)' % c class UMathModule(MixedModule): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -58,10 +58,10 @@ elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, - backstrides, storage=storage) + backstrides, storage=storage) else: impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, - backstrides, storage) + backstrides, storage) if w_subtype: w_ret = space.allocate_instance(W_NDimArray, w_subtype) W_NDimArray.__init__(w_ret, impl) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -87,7 +87,8 @@ value[0] = self.value builder = StringBuilder() - builder.append_charpsize(rffi.cast(rffi.CCHARP, value), rffi.sizeof(lltype.typeOf(self.value))) + builder.append_charpsize(rffi.cast(rffi.CCHARP, value), + rffi.sizeof(lltype.typeOf(self.value))) ret = builder.build() lltype.free(value, flavor="raw") @@ -117,7 +118,8 @@ value[1] = self.imag builder = StringBuilder() - builder.append_charpsize(rffi.cast(rffi.CCHARP, value), rffi.sizeof(lltype.typeOf(self.real)) * 2) + builder.append_charpsize(rffi.cast(rffi.CCHARP, value), + rffi.sizeof(lltype.typeOf(self.real)) * 2) ret = builder.build() lltype.free(value, flavor="raw") @@ -186,27 +188,27 @@ dtype = self.get_dtype(space) return space.wrap(dtype.itemtype.bool(self)) + def _unaryop_impl(ufunc_name): + def impl(self, space, w_out=None): + from pypy.module.micronumpy import ufuncs + return getattr(ufuncs.get(space), ufunc_name).call( + space, [self, w_out]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) + def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import ufuncs - return getattr(ufuncs.get(space), ufunc_name).call(space, - [self, w_other, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call( + space, [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): from pypy.module.micronumpy import ufuncs - return getattr(ufuncs.get(space), ufunc_name).call(space, - [w_other, self, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call( + space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) - def _unaryop_impl(ufunc_name): - def impl(self, space, w_out=None): - from pypy.module.micronumpy import ufuncs - return getattr(ufuncs.get(space), ufunc_name).call(space, - [self, w_out]) - return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) - descr_add = _binop_impl("add") descr_sub = _binop_impl("subtract") descr_mul = _binop_impl("multiply") diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -368,13 +368,10 @@ class ConcreteArray(ConcreteArrayNotOwning): def __init__(self, shape, dtype, order, strides, backstrides, storage=lltype.nullptr(RAW_STORAGE)): - null_storage = lltype.nullptr(RAW_STORAGE) + if storage == lltype.nullptr(RAW_STORAGE): + storage = dtype.itemtype.malloc(support.product(shape) * dtype.elsize) ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, backstrides, - null_storage) - if storage == lltype.nullptr(RAW_STORAGE): - self.storage = dtype.itemtype.malloc(self.size) - else: - self.storage = storage + storage) def __del__(self): free_raw_storage(self.storage, track_allocation=False) diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -1,3 +1,5 @@ +MAXDIMS = 32 + BOOL = 0 BYTE = 1 UBYTE = 2 diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -5,7 +5,6 @@ from pypy.module.micronumpy import descriptor, loop, ufuncs from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.converters import shape_converter -from pypy.module.micronumpy.strides import find_shape_and_elems def build_scalar(space, w_dtype, w_state): @@ -27,6 +26,8 @@ @unwrap_spec(ndmin=int, copy=bool, subok=bool) def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, ndmin=0): + from pypy.module.micronumpy import strides + # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): w___array__ = space.lookup(w_object, "__array__") @@ -68,12 +69,9 @@ return w_ret # not an array or incorrect dtype - shape, elems_w = find_shape_and_elems(space, w_object, dtype) + shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype) if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): - for w_elem in elems_w: - if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): - w_elem = w_elem.get_scalar_value() - dtype = ufuncs.find_dtype_for_scalar(space, w_elem, dtype) + dtype = strides.find_dtype_for_seq(space, elems_w, dtype) if dtype is None: dtype = descriptor.get_dtype_cache(space).w_float64dtype elif dtype.is_str_or_unicode() and dtype.elsize < 1: @@ -83,10 +81,10 @@ if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) - arr_iter = w_arr.create_iter() - for w_elem in elems_w: - arr_iter.setitem(dtype.coerce(space, w_elem)) - arr_iter.next() + if len(elems_w) == 1: + w_arr.set_scalar_value(dtype.coerce(space, elems_w[0])) + else: + loop.assign(space, w_arr, elems_w) return w_arr diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -164,6 +164,13 @@ arr_iter.setitem(box) arr_iter.next() +def assign(space, arr, seq): + arr_iter = arr.create_iter() + arr_dtype = arr.get_dtype() + for item in seq: + arr_iter.setitem(arr_dtype.coerce(space, item)) + arr_iter.next() + where_driver = jit.JitDriver(name='numpy_where', greens = ['shapelen', 'dtype', 'arr_dtype'], reds = 'auto') diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1045,6 +1045,26 @@ value = self.get_scalar_value() return space.float(value) + def descr_hex(self, space): + if self.get_size() != 1: + raise oefmt(space.w_TypeError, + "only length-1 arrays can be converted to Python scalars") + if not self.get_dtype().is_int(): + raise oefmt(space.w_TypeError, + "don't know how to convert scalar number to hex") + value = self.get_scalar_value() + return space.hex(value) + + def descr_oct(self, space): + if self.get_size() != 1: + raise oefmt(space.w_TypeError, + "only length-1 arrays can be converted to Python scalars") + if not self.get_dtype().is_int(): + raise oefmt(space.w_TypeError, + "don't know how to convert scalar number to oct") + value = self.get_scalar_value() + return space.oct(value) + def descr_index(self, space): if self.get_size() != 1 or \ not self.get_dtype().is_int() or self.get_dtype().is_bool(): @@ -1237,6 +1257,8 @@ __int__ = interp2app(W_NDimArray.descr_int), __long__ = interp2app(W_NDimArray.descr_long), __float__ = interp2app(W_NDimArray.descr_float), + __hex__ = interp2app(W_NDimArray.descr_hex), + __oct__ = interp2app(W_NDimArray.descr_oct), __buffer__ = interp2app(W_NDimArray.descr_get_data), __index__ = interp2app(W_NDimArray.descr_index), diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -181,6 +181,10 @@ return [], [w_iterable] if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): return [], [w_iterable] + return _find_shape_and_elems(space, w_iterable, is_rec_type) + + +def _find_shape_and_elems(space, w_iterable, is_rec_type): shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) while True: @@ -210,6 +214,25 @@ batch = new_batch +def find_dtype_for_seq(space, elems_w, dtype): + from pypy.module.micronumpy.ufuncs import find_dtype_for_scalar + if len(elems_w) == 1: + w_elem = elems_w[0] + if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): + w_elem = w_elem.get_scalar_value() + return find_dtype_for_scalar(space, w_elem, dtype) + return _find_dtype_for_seq(space, elems_w, dtype) + + +def _find_dtype_for_seq(space, elems_w, dtype): + from pypy.module.micronumpy.ufuncs import find_dtype_for_scalar + for w_elem in elems_w: + if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): + w_elem = w_elem.get_scalar_value() + dtype = find_dtype_for_scalar(space, w_elem, dtype) + return dtype + + def to_coords(space, shape, size, order, w_item_or_slice): '''Returns a start coord, step, and length. ''' diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -246,6 +246,13 @@ return CustomIntObject(value) + def test_constants(self): + import numpy as np + assert np.MAXDIMS is 32 + assert np.CLIP is 0 + assert np.WRAP is 1 + assert np.RAISE is 2 + def test_ndarray(self): from numpy import ndarray, array, dtype, flatiter @@ -2276,6 +2283,30 @@ exc = raises(TypeError, "float(np.array([1.5, 2.5]))") assert exc.value[0] == 'only length-1 arrays can be converted to Python scalars' + def test__hex__(self): + import numpy as np + assert hex(np.array(True)) == '0x1' + assert hex(np.array(15)) == '0xf' + assert hex(np.array([15])) == '0xf' + exc = raises(TypeError, "hex(np.array(1.5))") + assert str(exc.value) == "don't know how to convert scalar number to hex" + exc = raises(TypeError, "hex(np.array('15'))") + assert str(exc.value) == "don't know how to convert scalar number to hex" + exc = raises(TypeError, "hex(np.array([1, 2]))") + assert str(exc.value) == "only length-1 arrays can be converted to Python scalars" + + def test__oct__(self): + import numpy as np + assert oct(np.array(True)) == '01' + assert oct(np.array(15)) == '017' + assert oct(np.array([15])) == '017' + exc = raises(TypeError, "oct(np.array(1.5))") + assert str(exc.value) == "don't know how to convert scalar number to oct" + exc = raises(TypeError, "oct(np.array('15'))") + assert str(exc.value) == "don't know how to convert scalar number to oct" + exc = raises(TypeError, "oct(np.array([1, 2]))") + assert str(exc.value) == "only length-1 arrays can be converted to Python scalars" + def test__reduce__(self): from numpypy import array, dtype from cPickle import loads, dumps diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -480,23 +480,19 @@ def test_flat_setitem(self): result = self.run("flat_setitem") assert result == 1.0 - py.test.skip("don't run for now") self.check_trace_count(1) - # XXX not ideal, but hey, let's ignore it for now - self.check_simple_loop({'raw_load': 1, - 'raw_store': 1, - 'int_lt': 1, - 'int_gt': 1, - 'int_add': 4, - 'guard_true': 2, - 'arraylen_gc': 2, - 'jump': 1, - 'int_sub': 1, - # XXX bad part - 'int_and': 1, - 'int_mod': 1, - 'int_rshift': 1, - }) + self.check_simple_loop({ + 'call': 2, + 'getfield_gc': 2, + 'guard_no_exception': 2, + 'guard_not_invalidated': 1, + 'guard_true': 1, + 'int_gt': 1, + 'int_sub': 1, + 'jump': 1, + 'raw_load': 1, + 'raw_store': 1, + }) def define_dot(): return """ @@ -509,6 +505,7 @@ def test_dot(self): result = self.run("dot") assert result == 184 + self.check_trace_count(3) self.check_simple_loop({'float_add': 1, 'float_mul': 1, 'guard_not_invalidated': 1, @@ -526,7 +523,7 @@ 'guard_class': 4, 'guard_false': 2, 'guard_no_exception': 3, - 'guard_nonnull': 8, + 'guard_nonnull': 12, 'guard_nonnull_class': 4, 'guard_not_invalidated': 2, 'guard_true': 9, diff --git a/pypy/module/pypyjit/test/test_ztranslation.py b/pypy/module/pypyjit/test/test_ztranslation.py --- a/pypy/module/pypyjit/test/test_ztranslation.py +++ b/pypy/module/pypyjit/test/test_ztranslation.py @@ -1,5 +1,5 @@ +from pypy.objspace.fake.checkmodule import checkmodule -from pypy.objspace.fake.checkmodule import checkmodule def test_pypyjit_translates(): checkmodule('pypyjit') diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -7,13 +7,13 @@ from rpython.tool.udir import udir from rpython.tool import logparser from rpython.jit.tool.jitoutput import parse_prof -from pypy.module.pypyjit.test_pypy_c.model import (Log, find_ids_range, - find_ids, - OpMatcher, InvalidMatch) +from pypy.module.pypyjit.test_pypy_c.model import \ + Log, find_ids_range, find_ids, OpMatcher, InvalidMatch + class BaseTestPyPyC(object): log_string = 'jit-log-opt,jit-log-noopt,jit-log-virtualstate,jit-summary' - + def setup_class(cls): if '__pypy__' not in sys.builtin_module_names: py.test.skip("must run this test with pypy") @@ -98,7 +98,6 @@ class TestLog(object): - def test_find_ids_range(self): def f(): a = 0 # ID: myline @@ -127,7 +126,6 @@ class TestOpMatcher_(object): - def match(self, src1, src2, **kwds): from rpython.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) @@ -347,7 +345,6 @@ class TestRunPyPyC(BaseTestPyPyC): - def test_run_function(self): def f(a, b): return a+b @@ -385,7 +382,7 @@ assert len(loops) == 1 assert loops[0].filename == self.filepath assert len([op for op in loops[0].allops() if op.name == 'label']) == 0 - assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) == 0 + assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) == 0 # loops = log.loops_by_filename(self.filepath, is_entry_bridge=True) assert len(loops) == 1 @@ -454,7 +451,6 @@ # ops = loop.ops_by_id('foo', opcode='INPLACE_SUBTRACT') assert log.opnames(ops) == ['int_sub_ovf', 'guard_no_overflow'] - def test_inlined_function(self): def f(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -341,13 +341,11 @@ guard_value(p166, ConstPtr(ptr72), descr=...) p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=) guard_no_exception(descr=...) - i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) - setfield_gc(p167, 0, descr=) - setfield_gc(p167, ConstPtr(ptr86), descr=) - guard_no_exception(descr=...) + i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) i169 = int_add(i168, i97) i170 = int_sub(i160, i106) setfield_gc(p167, i168, descr=) + setfield_gc(p167, ConstPtr(null), descr=) setfield_gc(p167, ConstPtr(ptr89), descr=) i171 = uint_gt(i170, i108) guard_false(i171, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -43,6 +43,7 @@ log = self.run(main, []) assert log.result == 0 loop, = log.loops_by_filename(self.filepath) + skip('used to pass on 69421-f3e717c94913') assert loop.match(""" i81 = int_lt(i76, 300) guard_true(i81, descr=...) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2872,7 +2872,6 @@ py.test.raises(Exception, a.build_types, fun, [s_nonneg, int]) def test_sig_bug(self): - py.test.skip("_annenforceargs_ does not work for default arguments") def g(x, y=5): return y == 5 g._annenforceargs_ = (int, int) @@ -2880,7 +2879,8 @@ return g(x) a = self.RPythonAnnotator() s = a.build_types(fun, [int]) - assert not s.is_constant() + assert s.knowntype is bool + assert s.is_constant() def test_sig_list(self): def g(buf): diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -550,20 +550,18 @@ track_allocation = d.pop('track_allocation', True) if d: raise UnsupportedMallocFlags(d) - TYPE = op.args[0].value if zero: name += '_zero' if add_memory_pressure: name += '_add_memory_pressure' if not track_allocation: name += '_no_track_allocation' + TYPE = op.args[0].value op1 = self.prepare_builtin_call(op, name, args, (TYPE,), TYPE) - if name == 'raw_malloc_varsize': - ITEMTYPE = op.args[0].value.OF - if ITEMTYPE == lltype.Char: - return self._handle_oopspec_call(op1, args, - EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR, - EffectInfo.EF_CAN_RAISE) + if name.startswith('raw_malloc_varsize') and TYPE.OF == lltype.Char: + return self._handle_oopspec_call(op1, args, + EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR, + EffectInfo.EF_CAN_RAISE) return self.rewrite_op_direct_call(op1) def rewrite_op_malloc_varsize(self, op): @@ -594,7 +592,7 @@ name += '_no_track_allocation' op1 = self.prepare_builtin_call(op, name, [op.args[0]], (STRUCT,), STRUCT) - if name == 'raw_free': + if name.startswith('raw_free'): return self._handle_oopspec_call(op1, [op.args[0]], EffectInfo.OS_RAW_FREE, EffectInfo.EF_CANNOT_RAISE) @@ -840,8 +838,8 @@ RESULT = lltype.Ptr(STRUCT) assert RESULT == op.result.concretetype return self._do_builtin_call(op, 'alloc_with_del', [], - extra = (RESULT, vtable), - extrakey = STRUCT) + extra=(RESULT, vtable), + extrakey=STRUCT) heaptracker.register_known_gctype(self.cpu, vtable, STRUCT) opname = 'new_with_vtable' else: @@ -1240,7 +1238,7 @@ op1 = self.prepare_builtin_call(op, "llong_%s", args) op2 = self._handle_oopspec_call(op1, args, EffectInfo.OS_LLONG_%s, - EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) if %r == "TO_INT": assert op2.result.concretetype == lltype.Signed return op2 @@ -1272,7 +1270,7 @@ op1 = self.prepare_builtin_call(op, "ullong_%s", args) op2 = self._handle_oopspec_call(op1, args, EffectInfo.OS_LLONG_%s, - EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) return op2 ''' % (_op, _oopspec.lower(), _oopspec)).compile() diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -60,7 +60,7 @@ class FakeResidualCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op, **kwds): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None): return 'calldescr' def calldescr_canraise(self, calldescr): return True diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -1,7 +1,7 @@ import os from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.jit.metainterp.optimizeopt.util import args_dict_value +from rpython.jit.metainterp.optimizeopt.util import args_dict from rpython.jit.metainterp.history import Const from rpython.jit.metainterp.jitexc import JitException from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS @@ -95,6 +95,11 @@ # possible aliasing). self.clear() self._lazy_setfield = None + if optheap.postponed_op: + for a in op.getarglist(): + if a is optheap.postponed_op.result: + optheap.emit_postponed_op() + break optheap.next_optimization.propagate_forward(op) if not can_cache: return @@ -186,6 +191,9 @@ def flush(self): self.cached_dict_reads.clear() self.force_all_lazy_setfields_and_arrayitems() + self.emit_postponed_op() + + def emit_postponed_op(self): if self.postponed_op: postponed_op = self.postponed_op self.postponed_op = None @@ -235,10 +243,7 @@ def emit_operation(self, op): self.emitting_operation(op) - if self.postponed_op: - postponed_op = self.postponed_op - self.postponed_op = None - self.next_optimization.propagate_forward(postponed_op) + self.emit_postponed_op() if (op.is_comparison() or op.getopnum() == rop.CALL_MAY_FORCE or op.is_ovf()): self.postponed_op = op @@ -302,7 +307,7 @@ if descr in self.cached_dict_reads: d = self.cached_dict_reads[descr] else: - d = args_dict_value() + d = args_dict() self.cached_dict_reads[descr] = d try: res_v = d[args] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1660,6 +1660,16 @@ """ self.optimize_loop(ops, ops) + def test_setfield_int_eq_result(self): + # test that the setfield_gc does not end up before int_eq + ops = """ + [p1, i1, i2] + i3 = int_eq(i1, i2) + setfield_gc(p1, i3, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, ops) + def test_duplicate_setfield_aliasing(self): # a case where aliasing issues (and not enough cleverness) mean # that we fail to remove any setfield_gc diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1,14 +1,15 @@ import py from rpython.rlib.objectmodel import instantiate +from rpython.jit.metainterp import compile, resume +from rpython.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt, TreeLoop +from rpython.jit.metainterp.optimize import InvalidLoop +from rpython.jit.metainterp.optimizeopt import build_opt_chain from rpython.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin, BaseTest, convert_old_style_to_targets) -from rpython.jit.metainterp.optimizeopt import build_opt_chain -from rpython.jit.metainterp.optimize import InvalidLoop -from rpython.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt -from rpython.jit.metainterp.history import TreeLoop -from rpython.jit.metainterp import compile, resume +from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import \ + FakeMetaInterpStaticData from rpython.jit.metainterp.resoperation import rop, opname, oparity -from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import FakeMetaInterpStaticData + def test_build_opt_chain(): def check(chain, expected_names): @@ -40,7 +41,6 @@ class BaseTestWithUnroll(BaseTest): - enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" def optimize_loop(self, ops, expected, expected_preamble=None, @@ -93,8 +93,8 @@ def raises(self, e, fn, *args): return py.test.raises(e, fn, *args).value + class OptimizeOptTest(BaseTestWithUnroll): - def setup_method(self, meth=None): class FailDescr(compile.ResumeGuardDescr): oparse = None @@ -130,7 +130,6 @@ self.namespace.pop('fdescr', None) self.namespace.pop('fdescr2', None) - def test_simple(self): ops = """ [] @@ -606,9 +605,9 @@ i1 = ptr_eq(p0, NULL) guard_false(i1) [] i2 = ptr_ne(NULL, p0) - guard_true(i0) [] + guard_true(i2) [] i3 = ptr_eq(NULL, p0) - guard_false(i1) [] + guard_false(i3) [] guard_nonnull(p0) [] jump(p0) """ @@ -623,6 +622,30 @@ """ self.optimize_loop(ops, expected, preamble) + def test_nonnull_2(self): + ops = """ + [] + p0 = new_array(5, descr=arraydescr) # forces p0 != NULL + i0 = ptr_ne(p0, NULL) + guard_true(i0) [] + i1 = ptr_eq(p0, NULL) + guard_false(i1) [] + i2 = ptr_ne(NULL, p0) + guard_true(i2) [] + i3 = ptr_eq(NULL, p0) + guard_false(i3) [] + guard_nonnull(p0) [] + escape(p0) + jump() + """ + expected = """ + [] + p0 = new_array(5, descr=arraydescr) + escape(p0) + jump() + """ + self.optimize_loop(ops, expected) + def test_const_guard_value(self): ops = """ [] @@ -974,7 +997,6 @@ """ self.optimize_loop(ops, expected, preamble) - # ---------- def test_virtual_1(self): @@ -1252,7 +1274,6 @@ """ self.optimize_loop(ops, expected, preamble) - def test_virtual_constant_isnonnull(self): ops = """ [i0] @@ -1728,10 +1749,11 @@ # We cannot track virtuals that survive for more than two iterations. self.optimize_loop(ops, expected, preamble) - def test_virtual_raw_malloc(self): + def test_virtual_raw_malloc_basic(self): ops = """ [i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr) call('free', i2, descr=raw_free_descr) @@ -1743,13 +1765,32 @@ """ self.optimize_loop(ops, expected) + def test_virtual_raw_malloc_const(self): + ops = """ + [i1] + i5 = int_mul(10, 1) + i2 = call('malloc', i5, descr=raw_malloc_descr) + guard_no_exception() [] + setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) + i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr) + call('free', i2, descr=raw_free_descr) + jump(i3) + """ + expected = """ + [i1] + jump(i1) + """ + self.optimize_loop(ops, expected) + def test_virtual_raw_malloc_force(self): ops = """ [i1] - i2 = call('malloc', 10, descr=raw_malloc_descr) + i2 = call('malloc', 20, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr_char) setarrayitem_raw(i2, 2, 456, descr=rawarraydescr_char) setarrayitem_raw(i2, 1, 123, descr=rawarraydescr_char) + setarrayitem_raw(i2, 1, 789, descr=rawarraydescr_float) label('foo') # we expect the buffer to be forced *after* the label escape(i2) call('free', i2, descr=raw_free_descr) @@ -1758,12 +1799,12 @@ expected = """ [i1] label('foo') - i2 = call('malloc', 10, descr=raw_malloc_descr) - setarrayitem_raw(i2, 0, i1, descr=rawarraydescr_char) - i3 = int_add(i2, 1) - setarrayitem_raw(i3, 0, 123, descr=rawarraydescr_char) - i4 = int_add(i2, 2) - setarrayitem_raw(i4, 0, 456, descr=rawarraydescr_char) + i2 = call('malloc', 20, descr=raw_malloc_descr) + #guard_no_exception() [] # XXX should appear + raw_store(i2, 0, i1, descr=rawarraydescr_char) + raw_store(i2, 1, 123, descr=rawarraydescr_char) + raw_store(i2, 2, 456, descr=rawarraydescr_char) + raw_store(i2, 8, 789, descr=rawarraydescr_float) escape(i2) call('free', i2, descr=raw_free_descr) jump(i1) @@ -1774,6 +1815,7 @@ ops = """ [i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) label('foo') # we expect the buffer to be forced *after* the label setarrayitem_raw(i2, 2, 456, descr=rawarraydescr_char) # overlap! @@ -1784,7 +1826,8 @@ [i1] label('foo') i2 = call('malloc', 10, descr=raw_malloc_descr) - setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) + #guard_no_exception() [] # XXX should appear + raw_store(i2, 0, i1, descr=rawarraydescr) setarrayitem_raw(i2, 2, 456, descr=rawarraydescr_char) call('free', i2, descr=raw_free_descr) jump(i1) @@ -1795,6 +1838,7 @@ ops = """ [i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) label('foo') # we expect the buffer to be forced *after* the label i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr_char) @@ -1805,7 +1849,8 @@ [i1] label('foo') i2 = call('malloc', 10, descr=raw_malloc_descr) - setarrayitem_raw(i2, 0, i1, descr=rawarraydescr) + #guard_no_exception() [] # XXX should appear + raw_store(i2, 0, i1, descr=rawarraydescr) i3 = getarrayitem_raw(i2, 0, descr=rawarraydescr_char) call('free', i2, descr=raw_free_descr) jump(i1) @@ -1816,6 +1861,7 @@ ops = """ [i0, i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, 42, descr=rawarraydescr_char) i3 = int_add(i2, 1) # get a slice of the original buffer setarrayitem_raw(i3, 0, 4242, descr=rawarraydescr) # write to the slice @@ -1835,6 +1881,7 @@ ops = """ [i0, i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] i3 = int_add(i2, 1) # get a slice of the original buffer i4 = int_add(i3, 1) # get a slice of a slice setarrayitem_raw(i4, 0, i1, descr=rawarraydescr_char) # write to the slice @@ -1852,6 +1899,7 @@ ops = """ [i0, i1] i2 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i2, 0, 42, descr=rawarraydescr_char) i3 = int_add(i2, 1) # get a slice of the original buffer setarrayitem_raw(i3, 4, 4242, descr=rawarraydescr_char) # write to the slice @@ -1864,9 +1912,9 @@ label('foo') # these ops are generated by VirtualRawBufferValue._really_force i2 = call('malloc', 10, descr=raw_malloc_descr) - setarrayitem_raw(i2, 0, 42, descr=rawarraydescr_char) - i3 = int_add(i2, 5) # 1+4*sizeof(char) - setarrayitem_raw(i3, 0, 4242, descr=rawarraydescr_char) + #guard_no_exception() [] # XXX should appear + raw_store(i2, 0, 42, descr=rawarraydescr_char) + raw_store(i2, 5, 4242, descr=rawarraydescr_char) # this is generated by VirtualRawSliceValue._really_force i4 = int_add(i2, 1) escape(i4) @@ -1881,6 +1929,7 @@ i2 = int_add(i1, 1) call('free', i0, descr=raw_free_descr) i3 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] setarrayitem_raw(i3, 0, i2, descr=rawarraydescr) label('foo') jump(i3) @@ -1892,11 +1941,66 @@ call('free', i0, descr=raw_free_descr) label('foo') i3 = call('malloc', 10, descr=raw_malloc_descr) - setarrayitem_raw(i3, 0, i2, descr=rawarraydescr) + #guard_no_exception() [] # XXX should appear + raw_store(i3, 0, i2, descr=rawarraydescr) jump(i3) """ self.optimize_loop(ops, expected) + def test_virtual_raw_store_raw_load(self): + ops = """ + [i1] + i0 = call('malloc', 10, descr=raw_malloc_descr) + guard_no_exception() [] + raw_store(i0, 0, i1, descr=rawarraydescr) + i2 = raw_load(i0, 0, descr=rawarraydescr) + i3 = int_add(i1, i2) + call('free', i0, descr=raw_free_descr) + jump(i3) + """ + expected = """ + [i1] + i2 = int_add(i1, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_virtual_raw_store_getarrayitem_raw(self): + ops = """ + [f1] + i0 = call('malloc', 16, descr=raw_malloc_descr) + guard_no_exception() [] + raw_store(i0, 8, f1, descr=rawarraydescr_float) + f2 = getarrayitem_raw(i0, 1, descr=rawarraydescr_float) + f3 = float_add(f1, f2) + call('free', i0, descr=raw_free_descr) + jump(f3) + """ + expected = """ + [f1] + f2 = float_add(f1, f1) + jump(f2) + """ + self.optimize_loop(ops, expected) + + def test_virtual_setarrayitem_raw_raw_load(self): + ops = """ + [f1] + i0 = call('malloc', 16, descr=raw_malloc_descr) + guard_no_exception() [] + setarrayitem_raw(i0, 1, f1, descr=rawarraydescr_float) + f2 = raw_load(i0, 8, descr=rawarraydescr_float) + f3 = float_add(f1, f2) + call('free', i0, descr=raw_free_descr) + jump(f3) + """ + expected = """ + [f1] + f2 = float_add(f1, f1) + jump(f2) + """ + self.optimize_loop(ops, expected) + def test_duplicate_getfield_1(self): ops = """ [p1, p2] @@ -2789,8 +2893,7 @@ p2 = new_with_vtable(ConstClass(node_vtable)) jump(p2) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + self.raises(InvalidLoop, self.optimize_loop, ops, "crash!") def test_invalid_loop_2(self): ops = """ @@ -2801,8 +2904,7 @@ escape(p2) # prevent it from staying Virtual jump(p2) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + self.raises(InvalidLoop, self.optimize_loop, ops, "crash!") def test_invalid_loop_3(self): ops = """ @@ -2824,8 +2926,7 @@ guard_value(p2, ConstPtr(myptr)) [] jump(p2) """ - exc = self.raises(InvalidLoop, self.optimize_loop, - ops, "crash!") + exc = self.raises(InvalidLoop, self.optimize_loop, ops, "crash!") if exc: assert "node" in exc.msg @@ -3151,7 +3252,6 @@ """ self.optimize_loop(ops, expected) - def test_int_and_or_with_zero(self): ops = """ [i0, i1] @@ -5107,7 +5207,6 @@ """ self.optimize_loop(ops, expected) - def test_division_nonneg(self): py.test.skip("harder") # this is how an app-level division turns into right now @@ -5444,7 +5543,6 @@ """ self.optimize_loop(ops, ops, ops) - def test_mul_ovf(self): ops = """ [i0, i1] @@ -5591,7 +5689,6 @@ def is_integer_bounded(self): return False - for n in ('inst_w_seq', 'inst_index', 'inst_w_list', 'inst_length', 'inst_start', 'inst_step'): self.namespace[n] = FakeDescr(n) @@ -5847,7 +5944,7 @@ self.optimize_loop(ops, optops, preamble) # check with replacing 'str' with 'unicode' everywhere def r(s): - return s.replace('str','unicode').replace('s"', 'u"') + return s.replace('str', 'unicode').replace('s"', 'u"') self.optimize_loop(r(ops), r(optops), r(preamble)) def test_newstr_1(self): @@ -6277,7 +6374,7 @@ if isinstance(value, calldescrtype): extra = value.get_extra_info() if (extra and isinstance(extra, effectinfotype) and - extra.oopspecindex == oopspecindex): + extra.oopspecindex == oopspecindex): # returns 0 for 'func' in this test return value, 0 raise AssertionError("not found: oopspecindex=%d" % @@ -7395,7 +7492,6 @@ """ self.optimize_loop(ops, expected, expected_short=short) - def test_loopinvariant_constant_strgetitem(self): ops = """ [p0] @@ -7454,7 +7550,7 @@ """ self.optimize_loop(ops, expected, expected_short=short) - def test_propagate_virtual_arryalen(self): + def test_propagate_virtual_arraylen(self): ops = """ [p0] p404 = new_array(2, descr=arraydescr) @@ -7831,7 +7927,6 @@ """ self.optimize_loop(ops, expected) - def test_setarrayitem_followed_by_arraycopy(self): ops = """ [p1, p2] @@ -8124,7 +8219,6 @@ """ self.optimize_loop(ops, expected) - def test_issue1080_infinitie_loop_simple(self): ops = """ [p69] @@ -8149,8 +8243,7 @@ guard_value(p1, ConstPtr(myptr)) [] jump(p1) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, ops) + self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_licm_boxed_opaque_getitem(self): ops = """ @@ -8225,8 +8318,7 @@ guard_value(p1, ConstPtr(myptr)) [] jump(p1) """ - self.raises(InvalidLoop, self.optimize_loop, - ops, ops) + self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_cond_call_with_a_constant(self): ops = """ @@ -8253,6 +8345,16 @@ """ self.optimize_loop(ops, expected) + def test_hippyvm_unroll_bug(self): + ops = """ + [p0, i1, i2] + i3 = int_add(i1, 1) + i4 = int_eq(i3, i2) + setfield_gc(p0, i4, descr=valuedescr) + jump(p0, i3, i2) + """ + self.optimize_loop(ops, ops) + + class TestLLtype(OptimizeOptTest, LLtypeMixin): pass - diff --git a/rpython/jit/metainterp/optimizeopt/test/test_rawbuffer.py b/rpython/jit/metainterp/optimizeopt/test/test_rawbuffer.py --- a/rpython/jit/metainterp/optimizeopt/test/test_rawbuffer.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_rawbuffer.py @@ -17,8 +17,7 @@ ( 4, 2, 'descr2', 'two'), ( 8, 4, 'descr3', 'three'), (12, 2, 'descr4', 'four'), - ] - # + ] def test_write_value_update(): buf = RawBuffer(FakeCPU()) @@ -28,7 +27,7 @@ assert buf._get_memory() == [ ( 0, 4, 'descr', 'ONE'), ( 4, 2, 'descr', 'two'), - ] + ] def test_write_value_invalid_length(): buf = RawBuffer(FakeCPU()) @@ -38,7 +37,6 @@ with py.test.raises(InvalidRawWrite): buf.write_value(0, 4, 'descr2', 'two') - def test_write_value_overlapping_next(): buf = RawBuffer(FakeCPU()) buf.write_value(0, 4, 'descr', 'one') diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -226,6 +226,8 @@ hints={'nolength': True})) rawarraydescr_char = cpu.arraydescrof(lltype.Array(lltype.Char, hints={'nolength': True})) + rawarraydescr_float = cpu.arraydescrof(lltype.Array(lltype.Float, + hints={'nolength': True})) fc_array = lltype.GcArray( lltype.Struct( diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -1,7 +1,7 @@ import itertools import py -from rpython.rlib.objectmodel import r_dict, compute_identity_hash +from rpython.rlib.objectmodel import r_dict, compute_identity_hash, specialize from rpython.rlib.rarithmetic import intmask from rpython.rlib.unroll import unrolling_iterable from rpython.jit.metainterp import resoperation @@ -118,16 +118,10 @@ res = intmask((1000003 * res) ^ y) return res + at specialize.call_location() def args_dict(): return r_dict(args_eq, args_hash) -def args_dict_box(): - return r_dict(args_eq, args_hash) - -def args_dict_value(): - return r_dict(args_eq, args_hash) - - # ____________________________________________________________ def equaloplists(oplist1, oplist2, strict_fail_args=True, remap={}, @@ -186,4 +180,3 @@ assert False assert len(oplist1) == len(oplist2) return True - diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -432,22 +432,13 @@ optforce.emit_operation(self.source_op) self.box = self.source_op.result for i in range(len(self.buffer.offsets)): - # get a pointer to self.box+offset + # write the value offset = self.buffer.offsets[i] - if offset == 0: - arraybox = self.box - else: - arraybox = BoxInt() - op = ResOperation(rop.INT_ADD, - [self.box, ConstInt(offset)], arraybox) - optforce.emit_operation(op) - # - # write the value descr = self.buffer.descrs[i] itemvalue = self.buffer.values[i] itembox = itemvalue.force_box(optforce) - op = ResOperation(rop.SETARRAYITEM_RAW, - [arraybox, ConstInt(0), itembox], None, + op = ResOperation(rop.RAW_STORE, + [self.box, ConstInt(offset), itembox], None, descr=descr) optforce.emit_operation(op) @@ -657,7 +648,6 @@ def optimize_SETFIELD_GC(self, op): value = self.getvalue(op.getarg(0)) - if value.is_virtual(): fieldvalue = self.getvalue(op.getarg(1)) value.setfield(op.getdescr(), fieldvalue) @@ -674,14 +664,8 @@ def optimize_NEW_ARRAY(self, op): sizebox = self.get_constant_box(op.getarg(0)) if sizebox is not None: - # if the original 'op' did not have a ConstInt as argument, - # build a new one with the ConstInt argument - if not isinstance(op.getarg(0), ConstInt): - op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result, - descr=op.getdescr()) self.make_varray(op.getdescr(), sizebox.getint(), op.result, op) else: - self.getvalue(op.result).ensure_nonnull() self.emit_operation(op) def optimize_CALL(self, op): @@ -699,12 +683,12 @@ self.emit_operation(op) def do_RAW_MALLOC_VARSIZE_CHAR(self, op): - sizebox = op.getarg(1) - if not isinstance(sizebox, ConstInt): + sizebox = self.get_constant_box(op.getarg(1)) + if sizebox is None: self.emit_operation(op) return - size = sizebox.value - self.make_virtual_raw_memory(size, op.result, op) + self.make_virtual_raw_memory(sizebox.getint(), op.result, op) + self.last_emitted_operation = REMOVED def do_RAW_FREE(self, op): value = self.getvalue(op.getarg(1)) @@ -779,11 +763,12 @@ offset, itemsize, descr = self._unpack_arrayitem_raw_op(op, indexbox) try: itemvalue = value.getitem_raw(offset, itemsize, descr) - self.make_equal_to(op.result, itemvalue) except InvalidRawOperation: box = value.force_box(self) op.setarg(0, box) self.emit_operation(op) + else: + self.make_equal_to(op.result, itemvalue) return value.ensure_nonnull() self.emit_operation(op) @@ -805,6 +790,48 @@ value.ensure_nonnull() self.emit_operation(op) + def _unpack_raw_load_store_op(self, op, offsetbox): + offset = offsetbox.getint() + cpu = self.optimizer.cpu + descr = op.getdescr() + itemsize = cpu.unpack_arraydescr_size(descr)[1] + return offset, itemsize, descr + + def optimize_RAW_LOAD(self, op): + value = self.getvalue(op.getarg(0)) + if value.is_virtual(): + offsetbox = self.get_constant_box(op.getarg(1)) + if offsetbox is not None: + offset, itemsize, descr = self._unpack_raw_load_store_op(op, offsetbox) + try: + itemvalue = value.getitem_raw(offset, itemsize, descr) + except InvalidRawOperation: + box = value.force_box(self) + op.setarg(0, box) + self.emit_operation(op) + else: + self.make_equal_to(op.result, itemvalue) + return + value.ensure_nonnull() + self.emit_operation(op) + + def optimize_RAW_STORE(self, op): + value = self.getvalue(op.getarg(0)) + if value.is_virtual(): + offsetbox = self.get_constant_box(op.getarg(1)) + if offsetbox is not None: + offset, itemsize, descr = self._unpack_raw_load_store_op(op, offsetbox) + itemvalue = self.getvalue(op.getarg(2)) + try: + value.setitem_raw(offset, itemsize, descr, itemvalue) + except InvalidRawOperation: + box = value.force_box(self) + op.setarg(0, box) + self.emit_operation(op) + return + value.ensure_nonnull() + self.emit_operation(op) + def optimize_GETINTERIORFIELD_GC(self, op): value = self.getvalue(op.getarg(0)) if value.is_virtual(): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -11,7 +11,7 @@ ConstFloat, Box, TargetToken) from rpython.jit.metainterp.jitprof import EmptyProfiler from rpython.jit.metainterp.logger import Logger -from rpython.jit.metainterp.optimizeopt.util import args_dict_box +from rpython.jit.metainterp.optimizeopt.util import args_dict from rpython.jit.metainterp.resoperation import rop from rpython.rlib import nonconst, rstack from rpython.rlib.debug import debug_start, debug_stop, debug_print, make_sure_not_resized @@ -1656,7 +1656,7 @@ self.forced_virtualizable = None self.partial_trace = None self.retracing_from = -1 - self.call_pure_results = args_dict_box() + self.call_pure_results = args_dict() self.heapcache = HeapCache() self.call_ids = [] @@ -1784,16 +1784,14 @@ moreargs = [box] + extraargs else: moreargs = list(extraargs) - metainterp_sd = self.staticdata if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: - resumedescr = compile.ResumeGuardForcedDescr(metainterp_sd, + resumedescr = compile.ResumeGuardForcedDescr(self.staticdata, self.jitdriver_sd) elif opnum == rop.GUARD_NOT_INVALIDATED: resumedescr = compile.ResumeGuardNotInvalidated() else: resumedescr = compile.ResumeGuardDescr() - guard_op = self.history.record(opnum, moreargs, None, - descr=resumedescr) + guard_op = self.history.record(opnum, moreargs, None, descr=resumedescr) self.capture_resumedata(resumedescr, resumepc) self.staticdata.profiler.count_ops(opnum, Counters.GUARDS) # count diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -119,10 +119,11 @@ descr = self.getdescr() if descr is None or we_are_translated(): return '%s%s%s(%s)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in args])) + ', '.join([str(a) for a in args])) else: - return '%s%s%s(%s, descr=%r)' % (prefix, sres, self.getopname(), - ', '.join([str(a) for a in args]), descr) + return '%s%s%s(%s)' % (prefix, sres, self.getopname(), + ', '.join([str(a) for a in args] + + ['descr=%r' % descr])) def getopname(self): try: diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -1,7 +1,8 @@ from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, - free_raw_storage, raw_storage_getitem) + free_raw_storage, raw_storage_getitem) + class RawMemTests(object): def test_cast_void_ptr(self): @@ -44,6 +45,7 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) def test_raw_storage_float(self): def f(): @@ -57,6 +59,7 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) def test_raw_storage_byte(self): def f(): @@ -70,6 +73,21 @@ self.check_operations_history({'call': 2, 'guard_no_exception': 1, 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) + + def test_raw_storage_options(self): + def f(): + p = alloc_raw_storage(15, track_allocation=False, zero=True) + raw_storage_setitem(p, 3, 24) + res = raw_storage_getitem(lltype.Signed, p, 3) + free_raw_storage(p, track_allocation=False) + return res + res = self.interp_operations(f, []) + assert res == 24 + self.check_operations_history({'call': 2, 'guard_no_exception': 1, + 'raw_store': 1, 'raw_load': 1, + 'finish': 1}) + self.metainterp.staticdata.stats.check_resops({'finish': 1}, omit_finish=False) class TestRawMem(RawMemTests, LLJitMixin): diff --git a/rpython/jit/metainterp/test/test_resoperation.py b/rpython/jit/metainterp/test/test_resoperation.py --- a/rpython/jit/metainterp/test/test_resoperation.py +++ b/rpython/jit/metainterp/test/test_resoperation.py @@ -1,6 +1,7 @@ import py +import re from rpython.jit.metainterp import resoperation as rop -from rpython.jit.metainterp.history import AbstractDescr +from rpython.jit.metainterp.history import AbstractDescr, AbstractFailDescr def test_arity_mixins(): cases = [ @@ -55,12 +56,18 @@ op = rop.ResOperation(rop.rop.INT_ADD, ['a', 'b'], 'c') assert op.getarglist() == ['a', 'b'] assert op.result == 'c' + assert repr(op) == "c = int_add(a, b)" mydescr = AbstractDescr() op = rop.ResOperation(rop.rop.CALL, ['a', 'b'], 'c', descr=mydescr) assert op.getarglist() == ['a', 'b'] assert op.result == 'c' assert op.getdescr() is mydescr + assert re.match("c = call\(a, b, descr=<.+>\)$", repr(op)) + + mydescr = AbstractFailDescr() + op = rop.ResOperation(rop.rop.GUARD_NO_EXCEPTION, [], None, descr=mydescr) + assert re.match("guard_no_exception\(descr=<.+>\)$", repr(op)) def test_can_malloc(): mydescr = AbstractDescr() diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -1150,7 +1150,7 @@ res = self.meta_interp(f, [10]) assert res == 55 self.check_trace_count(1) - self.check_resops(setarrayitem_raw=0, getarrayitem_raw=0) + self.check_resops({'guard_true': 2, 'int_add': 4, 'int_lt': 2, 'jump': 1}) def test_raw_malloc_resume(self): mydriver = JitDriver(greens=[], reds = 'auto') @@ -1171,8 +1171,10 @@ assert f(10) == 4000+55 res = self.meta_interp(f, [10]) assert res == 4000+55 - # the getarrayitem_raw is in the bridge - self.check_resops(getarrayitem_raw=1, setarrayitem_raw=0) + self.check_trace_count(2) + self.check_resops({'guard_false': 2, 'guard_true': 5, + 'int_add': 8, 'int_gt': 3, 'int_lt': 4, 'int_mul': 2, + 'jump': 2}) def test_raw_malloc_no_virtualstate(self): mydriver = JitDriver(greens=[], reds = 'auto') @@ -1194,7 +1196,7 @@ assert res == 45 # make sure that the raw buffer is *not* virtualized because we do not # support virtualstate - self.check_resops(getarrayitem_raw=2, setarrayitem_raw=2) + self.check_resops(getarrayitem_raw=2, raw_store=2) def test_raw_malloc_only_chars(self): mydriver = JitDriver(greens=[], reds = 'auto') diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -132,8 +132,8 @@ The actual return value may be determined with os.WEXITSTATUS. """ + res = 0 ll_f = self.ll_file - res = 0 if ll_f: # double close is allowed self.ll_file = lltype.nullptr(FILE) diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -1,9 +1,9 @@ - import os, sys, py from rpython.rtyper.test.tool import BaseRtypingTest from rpython.tool.udir import udir from rpython.rlib import rfile + class TestFile(BaseRtypingTest): def setup_class(cls): cls.tmpdir = udir.join('test_rfile') @@ -208,6 +208,7 @@ assert s == "%s\n" % printval assert os.WEXITSTATUS(r) == retval + class TestPopenR(BaseRtypingTest): def setup_class(cls): if sys.platform == 'win32': diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -108,6 +108,8 @@ copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name) @jit.dont_look_inside + @signature(types.any(), types.any(), types.int(), types.int(), + returns=types.none()) def copy_raw_to_string(ptrsrc, dst, dststart, length): # xxx Warning: same note as above apply: don't do this at home assert length >= 0 diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -369,32 +369,21 @@ for rule in rules: m.rule(*rule) - objects = ' $(OBJECTS)' From noreply at buildbot.pypy.org Mon Mar 17 21:39:52 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 21:39:52 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: cleanup/reduce diff with default Message-ID: <20140317203952.98AC21C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: improve-consecutive-dict-lookups Changeset: r70029:0f464df7cb9f Date: 2014-03-17 15:41 -0400 http://bitbucket.org/pypy/pypy/changeset/0f464df7cb9f/ Log: cleanup/reduce diff with default diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -302,13 +302,12 @@ self.emit_operation(op) def _optimize_CALL_DICT_LOOKUP(self, op): - args = self.optimizer.make_args_key(op) descr = op.getdescr().get_extra_info().extradescr if descr in self.cached_dict_reads: d = self.cached_dict_reads[descr] else: - d = args_dict() - self.cached_dict_reads[descr] = d + d = self.cached_dict_reads[descr] = args_dict() + args = self.optimizer.make_args_key(op) try: res_v = d[args] except KeyError: diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -212,7 +212,6 @@ self.check_simple_loop(call=1, getinteriorfield_gc=2, guard_no_exception=1) - def test_ordered_dict_two_lookups(self): driver = JitDriver(greens = [], reds = 'auto') d = OrderedDict() @@ -273,6 +272,7 @@ assert res == f(10) self.check_simple_loop(call=3) + class TestLLtype(DictTests, LLJitMixin): pass diff --git a/rpython/rtyper/lltypesystem/rbuilder.py b/rpython/rtyper/lltypesystem/rbuilder.py --- a/rpython/rtyper/lltypesystem/rbuilder.py +++ b/rpython/rtyper/lltypesystem/rbuilder.py @@ -85,6 +85,13 @@ ll_builder.used = needed @staticmethod + def ll_append_char(ll_builder, char): + if ll_builder.used == ll_builder.allocated: + ll_builder.grow(ll_builder, 1) + ll_builder.buf.chars[ll_builder.used] = char + ll_builder.used += 1 + + @staticmethod def ll_append_slice(ll_builder, ll_str, start, end): needed = end - start used = ll_builder.used @@ -106,7 +113,6 @@ ll_builder.used = used @staticmethod - @enforceargs(None, None, int) def ll_append_charpsize(ll_builder, charp, size): used = ll_builder.used if used + size > ll_builder.allocated: @@ -132,14 +138,6 @@ return ll_builder != nullptr(cls.lowleveltype.TO) class StringBuilderRepr(BaseStringBuilderRepr): - @staticmethod - @enforceargs(None, lltype.Char) - def ll_append_char(ll_builder, char): - if ll_builder.used == ll_builder.allocated: - ll_builder.grow(ll_builder, 1) - ll_builder.buf.chars[ll_builder.used] = char - ll_builder.used += 1 - lowleveltype = lltype.Ptr(STRINGBUILDER) basetp = STR mallocfn = staticmethod(rstr.mallocstr) @@ -150,15 +148,6 @@ ) class UnicodeBuilderRepr(BaseStringBuilderRepr): - - @staticmethod - @enforceargs(None, lltype.UniChar) - def ll_append_char(ll_builder, char): - if ll_builder.used == ll_builder.allocated: - ll_builder.grow(ll_builder, 1) - ll_builder.buf.chars[ll_builder.used] = char - ll_builder.used += 1 - lowleveltype = lltype.Ptr(UNICODEBUILDER) basetp = UNICODE mallocfn = staticmethod(rstr.mallocunicode) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -4,7 +4,7 @@ from rpython.rlib import jit, types from rpython.rlib.debug import ll_assert from rpython.rlib.objectmodel import (malloc_zero_filled, we_are_translated, - _hash_string, keepalive_until_here, specialize, enforceargs) + _hash_string, keepalive_until_here, specialize) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.error import TyperError @@ -121,10 +121,9 @@ llmemory.raw_memcopy(srcbuf, dst, llmemory.sizeof(CHAR_TP) * length) # end of "no GC" section keepalive_until_here(dst) + copy_raw_to_string._always_inline_ = True copy_raw_to_string = func_with_new_name(copy_raw_to_string, 'copy_raw_to_%s' % name) - copy_raw_to_string._always_inline_ = True - copy_raw_to_string._annenforceargs_ = (None, None, int, int) return copy_string_to_raw, copy_raw_to_string, copy_string_contents From noreply at buildbot.pypy.org Mon Mar 17 21:39:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 21:39:53 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: filter guards like the other optimizers Message-ID: <20140317203953.DB5301C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: improve-consecutive-dict-lookups Changeset: r70030:ecaad25d95ac Date: 2014-03-17 15:19 -0400 http://bitbucket.org/pypy/pypy/changeset/ecaad25d95ac/ Log: filter guards like the other optimizers diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -4,7 +4,7 @@ from rpython.jit.metainterp.optimizeopt.util import args_dict from rpython.jit.metainterp.history import Const from rpython.jit.metainterp.jitexc import JitException -from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS +from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS, REMOVED from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib.objectmodel import we_are_translated @@ -182,7 +182,6 @@ self._remove_guard_not_invalidated = False self._seen_guard_not_invalidated = False self.postponed_op = None - self.remove_next_guard = False def force_at_end_of_preamble(self): self.cached_dict_reads.clear() @@ -312,16 +311,14 @@ res_v = d[args] except KeyError: d[args] = self.getvalue(op.result) - res = False + return False else: self.make_equal_to(op.result, res_v) - self.remove_next_guard = True - res = True - return res + self.last_emitted_operation = REMOVED + return True def optimize_GUARD_NO_EXCEPTION(self, op): - if self.remove_next_guard: - self.remove_next_guard = False + if self.last_emitted_operation is REMOVED: return self.emit_operation(op) From noreply at buildbot.pypy.org Mon Mar 17 22:23:26 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 17 Mar 2014 22:23:26 +0100 (CET) Subject: [pypy-commit] pypy default: allow pytineractive to use ssl and thread modules on win32 Message-ID: <20140317212326.E99451C11A4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70031:9a94ce726040 Date: 2014-03-17 23:22 +0200 http://bitbucket.org/pypy/pypy/changeset/9a94ce726040/ Log: allow pytineractive to use ssl and thread modules on win32 diff --git a/pypy/module/_ssl/thread_lock.py b/pypy/module/_ssl/thread_lock.py --- a/pypy/module/_ssl/thread_lock.py +++ b/pypy/module/_ssl/thread_lock.py @@ -1,6 +1,7 @@ from rpython.rlib.ropenssl import * from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo +import sys # CRYPTO_set_locking_callback: # @@ -64,11 +65,17 @@ from rpython.rlib import rthread +libraries = [] +if sys.platform == 'win32': + # XXX Not needed for mingw32... + libraries = ['libeay32', 'user32', 'advapi32', 'gdi32'] + eci = rthread.eci.merge(ExternalCompilationInfo( separate_module_sources=[separate_module_source], post_include_bits=[ "int _PyPy_SSL_SetupThreads(void);"], export_symbols=['_PyPy_SSL_SetupThreads'], + libraries = libraries, )) _PyPy_SSL_SetupThreads = rffi.llexternal('_PyPy_SSL_SetupThreads', From noreply at buildbot.pypy.org Mon Mar 17 22:45:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 22:45:21 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140317214521.D53B11C11A4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70032:8984f0d53705 Date: 2014-03-17 17:35 -0400 http://bitbucket.org/pypy/pypy/changeset/8984f0d53705/ Log: cleanup diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -207,14 +207,13 @@ ... """) ops = loop.ops_by_id('cfficall') - assert 'raw_malloc' not in str(ops) - assert 'raw_free' not in str(ops) - assert 'getarrayitem_raw' not in log.opnames(ops) - assert 'setarrayitem_raw' not in log.opnames(ops) + for name in ['raw_malloc', 'raw_free']: + assert name not in str(ops) + for name in ['raw_load', 'raw_store', 'getarrayitem_raw', 'setarrayitem_raw']: + assert name not in log.opnames(ops) # so far just check that call_release_gil() is produced. # later, also check that the arguments to call_release_gil() # are constants - # are constants, and that the numerous raw_mallocs are removed def test_cffi_call_guard_not_forced_fails(self): # this is the test_pypy_c equivalent of From noreply at buildbot.pypy.org Mon Mar 17 22:45:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 22:45:23 +0100 (CET) Subject: [pypy-commit] pypy default: get libraries from ropenssl Message-ID: <20140317214523.2A4801C11A4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70033:56b6b496d480 Date: 2014-03-17 17:44 -0400 http://bitbucket.org/pypy/pypy/changeset/56b6b496d480/ Log: get libraries from ropenssl diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -1,5 +1,5 @@ class AppTestSSL: - spaceconfig = dict(usemodules=('_ssl', '_socket')) + spaceconfig = dict(usemodules=('_ssl', '_socket', 'thread')) def setup_class(cls): import os diff --git a/pypy/module/_ssl/thread_lock.py b/pypy/module/_ssl/thread_lock.py --- a/pypy/module/_ssl/thread_lock.py +++ b/pypy/module/_ssl/thread_lock.py @@ -1,7 +1,7 @@ -from rpython.rlib.ropenssl import * +from rpython.rlib import rthread +from rpython.rlib.ropenssl import libraries from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo -import sys # CRYPTO_set_locking_callback: # @@ -23,7 +23,6 @@ # without caring about the GIL. separate_module_source = """ - #include static unsigned int _ssl_locks_count = 0; @@ -63,13 +62,6 @@ } """ -from rpython.rlib import rthread - -libraries = [] -if sys.platform == 'win32': - # XXX Not needed for mingw32... - libraries = ['libeay32', 'user32', 'advapi32', 'gdi32'] - eci = rthread.eci.merge(ExternalCompilationInfo( separate_module_sources=[separate_module_source], post_include_bits=[ From noreply at buildbot.pypy.org Mon Mar 17 23:19:03 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 23:19:03 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140317221903.5796C1C12F3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70034:8475b0021a83 Date: 2014-03-17 18:07 -0400 http://bitbucket.org/pypy/pypy/changeset/8475b0021a83/ Log: cleanup diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -7,12 +7,13 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.jit.codewriter import heaptracker + class VirtualTests: def _freeze_(self): return True def test_virtualized1(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'node']) def f(n): node = self._new() node.value = 0 @@ -33,9 +34,8 @@ self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, new=0) - def test_virtualized2(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) + myjitdriver = JitDriver(greens=[], reds=['n', 'node1', 'node2']) def f(n): node1 = self._new() node1.value = 0 @@ -59,7 +59,7 @@ def test_virtualized_circular1(self): class MyNode(): pass - myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'node']) def f(n): node = MyNode() node.value = 0 @@ -79,11 +79,11 @@ res = self.meta_interp(f, [10]) assert res == 55 * 10 self.check_trace_count(1) - self.check_resops(new_with_vtable=0, setfield_gc=0, - getfield_gc=3, new=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=3, + new=0) def test_virtualized_float(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'node']) def f(n): node = self._new() node.floatval = 0.0 @@ -100,7 +100,7 @@ self.check_resops(new=0, float_add=1) def test_virtualized_float2(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'node']) def f(n): node = self._new() node.floatval = 0.0 @@ -117,9 +117,8 @@ self.check_trace_count(1) self.check_resops(new=0, float_add=2) - def test_virtualized_2(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'node']) def f(n): node = self._new() node.value = 0 @@ -144,7 +143,7 @@ new=0) def test_nonvirtual_obj_delays_loop(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'node']) node0 = self._new() node0.value = 10 def f(n): @@ -161,11 +160,11 @@ res = self.meta_interp(f, [500]) assert res == 640 self.check_trace_count(1) - self.check_resops(new_with_vtable=0, setfield_gc=0, - getfield_gc=1, new=0) + self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=1, + new=0) def test_two_loops_with_virtual(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'node']) def f(n): node = self._new() node.value = 0 @@ -185,12 +184,11 @@ res = self.meta_interp(f, [18]) assert res == f(18) self.check_trace_count(2) - self.check_resops(new_with_vtable=0, setfield_gc=0, - getfield_gc=2, new=0) - + self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, + new=0) def test_two_loops_with_escaping_virtual(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'node']) def externfn(node): llop.debug_print(lltype.Void, compute_unique_id(node), node.value, node.extra) @@ -218,7 +216,7 @@ self.check_resops(int_mul=0, call=1) def test_two_virtuals(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'prev']) + myjitdriver = JitDriver(greens=[], reds=['n', 'prev']) class Foo(object): def __init__(self, x, y): self.x = x @@ -241,7 +239,7 @@ self.check_resops(new_with_vtable=0, new=0) def test_specialied_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) + myjitdriver = JitDriver(greens=[], reds=['y', 'x', 'res']) class A: def __init__(self, val): self.val = val @@ -253,7 +251,7 @@ myjitdriver.can_enter_jit(y=y, x=x, res=res) myjitdriver.jit_merge_point(y=y, x=x, res=res) res = res.binop(A(y)) - if y<7: + if y < 7: res = x x = A(1) y -= 1 @@ -267,7 +265,7 @@ assert res == g(6, 14) def test_both_virtual_and_field_variable(self): - myjitdriver = JitDriver(greens = [], reds = ['n']) + myjitdriver = JitDriver(greens=[], reds=['n']) class Foo(object): pass def f(n): @@ -286,7 +284,7 @@ self.check_resops(new_with_vtable=0, new=0) def test_immutable_constant_getfield(self): - myjitdriver = JitDriver(greens = ['stufflist'], reds = ['n', 'i']) + myjitdriver = JitDriver(greens=['stufflist'], reds=['n', 'i']) class Stuff(object): _immutable_ = True @@ -312,7 +310,7 @@ self.check_resops(getfield_gc=0) def test_escapes(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'parent']) + myjitdriver = JitDriver(greens=[], reds=['n', 'parent']) class Parent(object): def __init__(self, node): @@ -341,7 +339,7 @@ self.check_resops(**{self._new_op: 1}) def test_virtual_on_virtual(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'parent']) + myjitdriver = JitDriver(greens=[], reds=['n', 'parent']) class Node(object): def __init__(self, f): @@ -371,7 +369,7 @@ self.check_resops(new=0, new_with_vtable=0) def test_bridge_from_interpreter(self): - mydriver = JitDriver(reds = ['n', 'f'], greens = []) + mydriver = JitDriver(reds=['n', 'f'], greens=[]) def f(n): f = self._new() @@ -397,7 +395,7 @@ self.check_enter_count(2) def test_new_virtual_member_in_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'sa', 'node']) def f(n): node = self._new() node.value = 1 @@ -406,19 +404,19 @@ while n > 0: myjitdriver.can_enter_jit(n=n, sa=sa, node=node) myjitdriver.jit_merge_point(n=n, sa=sa, node=node) - if n&30 > 0: + if n & 30 > 0: sa += node.value next = self._new() next.value = n node = next - if n<10: + if n < 10: node.extra = sa n -= 1 return node.extra assert self.meta_interp(f, [20]) == f(20) def test_constant_virtual1(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'sa', 'node']) def f(n): node = self._new() node.value = 1 @@ -426,11 +424,11 @@ while n > 0: myjitdriver.can_enter_jit(n=n, sa=sa, node=node) myjitdriver.jit_merge_point(n=n, sa=sa, node=node) - if n>20: + if n > 20: next = self._new() next.value = 2 node = next - elif n>10: + elif n > 10: next = self._new() next.value = 3 node = next @@ -440,7 +438,7 @@ assert self.meta_interp(f, [30]) == f(30) def test_constant_virtual2(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'sa', 'node']) def f(n): node = self._new() node.value = 1 @@ -449,7 +447,7 @@ myjitdriver.can_enter_jit(n=n, sa=sa, node=node) myjitdriver.jit_merge_point(n=n, sa=sa, node=node) sa += node.value - if n&15 > 7: + if n & 15 > 7: next = self._new() next.value = 2 node = next @@ -465,7 +463,7 @@ class RefNode(object): def __init__(self, ref): self.ref = ref - myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'node1', 'node2']) + myjitdriver = JitDriver(greens=[], reds=['n', 'sa', 'node1', 'node2']) def f(n): node1 = self._new() node1.value = 1 @@ -474,7 +472,7 @@ while n > 0: myjitdriver.can_enter_jit(n=n, sa=sa, node1=node1, node2=node2) myjitdriver.jit_merge_point(n=n, sa=sa, node1=node1, node2=node2) - if n>10: + if n > 10: next = self._new() next.value = 2 node1 = next @@ -496,7 +494,7 @@ class RefNode(object): def __init__(self, ref): self.ref = ref - myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'node1', 'node2']) + myjitdriver = JitDriver(greens=[], reds=['n', 'sa', 'node1', 'node2']) def f(n): node1 = self._new() node1.value = 1 @@ -505,7 +503,7 @@ while n > 0: myjitdriver.can_enter_jit(n=n, sa=sa, node1=node1, node2=node2) myjitdriver.jit_merge_point(n=n, sa=sa, node1=node1, node2=node2) - if n>10: + if n > 10: next = self._new() next.value = node1.value + 2 node1 = next @@ -522,7 +520,7 @@ class RefNode(object): def __init__(self, ref): self.ref = ref - myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'node1', 'node2']) + myjitdriver = JitDriver(greens=[], reds=['n', 'sa', 'node1', 'node2']) def f(n): node1 = self._new() node1.value = 1 @@ -533,7 +531,7 @@ myjitdriver.jit_merge_point(n=n, sa=sa, node1=node1, node2=node2) node2.ref.value += n sa += node1.value - if n>10: + if n > 10: next = self._new() next.value = node1.value + 1 node1 = next @@ -544,7 +542,7 @@ assert self.meta_interp(f, [20]) == f(20) def test_dual_counter(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 's', 'node1', 'node2']) + myjitdriver = JitDriver(greens=[], reds=['n', 's', 'node1', 'node2']) def f(n, s): node1 = self._new() node1.value = 1 @@ -553,7 +551,7 @@ while n > 0: myjitdriver.can_enter_jit(n=n, s=s, node1=node1, node2=node2) myjitdriver.jit_merge_point(n=n, s=s, node1=node1, node2=node2) - if (n>>s) & 1: + if (n >> s) & 1: next = self._new() next.value = node1.value + 1 node1 = next @@ -570,7 +568,7 @@ self.check_resops(new=0, new_with_vtable=0) def test_single_virtual_forced_in_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 's', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 's', 'node']) def externfn(node): node.value *= 2 def f(n, s): @@ -582,7 +580,7 @@ next = self._new() next.value = node.value + 1 node = next - if (n>>s) & 1: + if (n >> s) & 1: externfn(node) n -= 1 return node.value @@ -592,7 +590,7 @@ assert res == f(40, 3) def test_forced_virtual_assigned_in_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 's', 'node', 'node2']) + myjitdriver = JitDriver(greens=[], reds=['n', 's', 'node', 'node2']) def externfn(node): node.value += 1 def f(n, s): @@ -606,7 +604,7 @@ next = self._new() next.value = node.value + 1 node = next - if (n>>s) & 1: + if (n >> s) & 1: node2.value += node.value node = node2 externfn(node) @@ -620,7 +618,7 @@ self.check_trace_count(3) def test_forced_virtual_assigned_different_class_in_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 's', 'node', 'node2']) + myjitdriver = JitDriver(greens=[], reds=['n', 's', 'node', 'node2']) def externfn(node): node.value += 1 class A(object): @@ -636,7 +634,7 @@ while n > 0: myjitdriver.can_enter_jit(n=n, s=s, node=node, node2=node2) myjitdriver.jit_merge_point(n=n, s=s, node=node, node2=node2) - if (n>>s) & 1: + if (n >> s) & 1: node2.value += node.value node = node2 else: @@ -659,7 +657,7 @@ assert res == g2(48, 3) def test_empty_virtual_with_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 's', 'sa', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 's', 'sa', 'node']) def f(n, s): node = self._new() sa = 0 @@ -668,7 +666,7 @@ myjitdriver.jit_merge_point(n=n, s=s, sa=sa, node=node) next = self._new() node = next - if (n>>s) & 1: + if (n >> s) & 1: sa += 1 else: sa += 2 @@ -680,13 +678,13 @@ assert res == f(40, 3) def test_virtual_array_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'node']) def f(n): node = [42, 42] while n > 0: myjitdriver.can_enter_jit(n=n, node=node) myjitdriver.jit_merge_point(n=n, node=node) - if (n>>3) & 1: + if (n >> 3) & 1: node = [node[0], node[1] + n] else: node = [node[0] + n, node[1]] @@ -695,13 +693,13 @@ assert self.meta_interp(f, [40]) == f(40) def test_virtual_array_different_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) + myjitdriver = JitDriver(greens=[], reds=['n', 'node']) def f(n): node = [42, 42] while n > 0: myjitdriver.can_enter_jit(n=n, node=node) myjitdriver.jit_merge_point(n=n, node=node) - if (n>>3) & 1: + if (n >> 3) & 1: node = [node[0], node[1] + n] else: node = [node[0] + n, node[-1], node[0] + node[1]] @@ -710,7 +708,7 @@ assert self.meta_interp(f, [40]) == f(40) def FIXME_why_does_this_force(self): - mydriver = JitDriver(reds = ['i', 'j'], greens = []) + mydriver = JitDriver(reds=['i', 'j'], greens=[]) def f(): i = self._new() i.value = 0 @@ -726,7 +724,7 @@ assert self.meta_interp(f, []) == 20 def FIXME_why_does_this_force2(self): - mydriver = JitDriver(reds = ['i', 'j'], greens = []) + mydriver = JitDriver(reds=['i', 'j'], greens=[]) def f(): i = self._new() i.value = 0 @@ -746,7 +744,7 @@ assert self.meta_interp(f, []) == 20 def test_virtual_skipped_by_bridge(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'm', 'i', 'x']) + myjitdriver = JitDriver(greens=[], reds=['n', 'm', 'i', 'x']) def f(n, m): x = self._new() x.value = 0 @@ -754,7 +752,7 @@ while i < n: myjitdriver.can_enter_jit(n=n, m=m, i=i, x=x) myjitdriver.jit_merge_point(n=n, m=m, i=i, x=x) - if i&m != m: + if i & m != m: newx = self._new() newx.value = x.value + i x = newx @@ -764,7 +762,7 @@ assert res == f(0x1F, 0x11) def test_duplicated_virtual(self): - myjitdriver = JitDriver(greens = [], reds = ['n', 'node1', 'node2']) + myjitdriver = JitDriver(greens=[], reds=['n', 'node1', 'node2']) def f(n): node1 = self._new() node1.value = 0 @@ -782,13 +780,11 @@ assert res == f(10) self.check_resops(new_with_vtable=0, new=0) - - def test_retrace_not_matching_bridge(self): @dont_look_inside def external(node): return node.value + 1 - myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'node', 'node2']) + myjitdriver = JitDriver(greens=[], reds=['n', 'i', 'node', 'node2']) class A(): def new(self): return A() @@ -825,7 +821,7 @@ @dont_look_inside def external(node): return node.value + 1 - myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'node', 'node2', 's']) + myjitdriver = JitDriver(greens=[], reds=['n', 'i', 'node', 'node2', 's']) class A(): def new(self): return A() @@ -866,7 +862,7 @@ bytecode = "iajb+JI" def get_printable_location(i): return "%d: %s" % (i, bytecode[i]) - myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'sa', 'i', 'j'], + myjitdriver = JitDriver(greens=['pc'], reds=['n', 'sa', 'i', 'j'], get_printable_location=get_printable_location) def f(n): pc = sa = 0 @@ -907,7 +903,7 @@ class Int(object): def __init__(self, val): self.val = val - myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'sa', 'i', 'j']) + myjitdriver = JitDriver(greens=['pc'], reds=['n', 'sa', 'i', 'j']) bytecode = "iajb+JI" def f(n): pc = sa = 0 @@ -947,10 +943,10 @@ self.check_aborted_count(0) self.check_target_token_count(3) + class VirtualMiscTests: - def test_multiple_equal_virtuals(self): - mydriver = JitDriver(reds = ['i'], greens = []) + mydriver = JitDriver(reds=['i'], greens=[]) class A: pass def f(): @@ -971,7 +967,7 @@ class A(object): def __init__(self, x): self.x = x - mydriver = JitDriver(reds = ['n'], greens = []) + mydriver = JitDriver(reds=['n'], greens=[]) global_a = A(0) def g(a): @@ -996,7 +992,7 @@ class A(object): def __init__(self, x): self.x = x - mydriver = JitDriver(reds = ['n', 'tot'], greens = []) + mydriver = JitDriver(reds=['n', 'tot'], greens=[]) def f(n): tot = 0 @@ -1017,7 +1013,7 @@ assert r == expected def test_arraycopy_disappears(self): - mydriver = JitDriver(reds = ['i'], greens = []) + mydriver = JitDriver(reds=['i'], greens=[]) def f(): i = 0 while i < 10: @@ -1033,7 +1029,7 @@ self.check_resops(new_array=0) def test_virtual_streq_bug(self): - mydriver = JitDriver(reds = ['i', 's', 'a'], greens = []) + mydriver = JitDriver(reds=['i', 's', 'a'], greens=[]) class A(object): def __init__(self, state): @@ -1060,7 +1056,7 @@ assert res == f() def test_getfield_gc_pure_nobug(self): - mydriver = JitDriver(reds = ['i', 's', 'a'], greens = []) + mydriver = JitDriver(reds=['i', 's', 'a'], greens=[]) class A(object): _immutable_fields_ = ['foo'] @@ -1088,7 +1084,7 @@ assert res == f() def test_virtual_attribute_pure_function(self): - mydriver = JitDriver(reds = ['i', 'sa', 'n', 'node'], greens = []) + mydriver = JitDriver(reds=['i', 'sa', 'n', 'node'], greens=[]) class A(object): def __init__(self, v1, v2): self.v1 = v1 @@ -1110,7 +1106,7 @@ assert res == f(16) def test_virtual_loop_invariant_getitem(self): - mydriver = JitDriver(reds = ['i', 'sa', 'n', 'node1', 'node2'], greens = []) + mydriver = JitDriver(reds=['i', 'sa', 'n', 'node1', 'node2'], greens=[]) class A(object): def __init__(self, v1, v2): self.v1 = v1 @@ -1134,7 +1130,7 @@ self.check_resops(getfield_gc=7) def test_raw_malloc(self): - mydriver = JitDriver(greens=[], reds = 'auto') + mydriver = JitDriver(greens=[], reds='auto') def f(n): i = 0 res = 0 @@ -1153,7 +1149,7 @@ self.check_resops({'guard_true': 2, 'int_add': 4, 'int_lt': 2, 'jump': 1}) def test_raw_malloc_resume(self): - mydriver = JitDriver(greens=[], reds = 'auto') + mydriver = JitDriver(greens=[], reds='auto') def f(n): i = 0 res = 0 @@ -1177,7 +1173,7 @@ 'jump': 2}) def test_raw_malloc_no_virtualstate(self): - mydriver = JitDriver(greens=[], reds = 'auto') + mydriver = JitDriver(greens=[], reds='auto') def f(n): res = 0 buffer = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw') @@ -1199,7 +1195,7 @@ self.check_resops(getarrayitem_raw=2, raw_store=2) def test_raw_malloc_only_chars(self): - mydriver = JitDriver(greens=[], reds = 'auto') + mydriver = JitDriver(greens=[], reds='auto') def f(n): i = 0 res = 0 @@ -1218,7 +1214,6 @@ self.check_trace_count(1) self.check_resops(setarrayitem_raw=2, getarrayitem_raw=4) - # ____________________________________________________________ # Run 1: all the tests instantiate a real RPython class @@ -1241,12 +1236,12 @@ class Subclass(MyClass): pass - myjitdriver = JitDriver(greens = [], reds = ['n', 'res']) + myjitdriver = JitDriver(greens=[], reds=['n', 'res']) def f(n): res = 0 node = MyClass() - node.value = n # so that the annotator doesn't think that value is constant - node.value2 = n # ditto + node.value = n # so that the annotator doesn't think that value is constant + node.value2 = n # ditto while n > 0: myjitdriver.can_enter_jit(n=n, res=res) myjitdriver.jit_merge_point(n=n, res=res) @@ -1277,7 +1272,6 @@ def _new(): return lltype.malloc(NODE) - # ____________________________________________________________ # Run 3: all the tests use lltype.malloc to make a NODE2 # (same as Run 2 but it is part of the OBJECT hierarchy) From noreply at buildbot.pypy.org Mon Mar 17 23:19:04 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 17 Mar 2014 23:19:04 +0100 (CET) Subject: [pypy-commit] pypy default: enable these two tests Message-ID: <20140317221904.95E8E1C12F3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70035:049d8d5677f9 Date: 2014-03-17 18:18 -0400 http://bitbucket.org/pypy/pypy/changeset/049d8d5677f9/ Log: enable these two tests diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -707,7 +707,7 @@ return node[0] + node[1] assert self.meta_interp(f, [40]) == f(40) - def FIXME_why_does_this_force(self): + def test_this_doesnt_force1(self): mydriver = JitDriver(reds=['i', 'j'], greens=[]) def f(): i = self._new() @@ -722,8 +722,9 @@ j = nxt return i.value + j.value assert self.meta_interp(f, []) == 20 + self.check_resops(new_with_vtable=0, new=0) - def FIXME_why_does_this_force2(self): + def test_this_doesnt_force2(self): mydriver = JitDriver(reds=['i', 'j'], greens=[]) def f(): i = self._new() @@ -742,6 +743,7 @@ i = j return i.value + j.value assert self.meta_interp(f, []) == 20 + self.check_resops(new_with_vtable=0, new=0) def test_virtual_skipped_by_bridge(self): myjitdriver = JitDriver(greens=[], reds=['n', 'm', 'i', 'x']) From noreply at buildbot.pypy.org Tue Mar 18 02:35:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 02:35:31 +0100 (CET) Subject: [pypy-commit] pypy default: win32 doesn't have unistd.h Message-ID: <20140318013531.538E61C3273@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70036:3cec52588761 Date: 2014-03-17 18:17 -0700 http://bitbucket.org/pypy/pypy/changeset/3cec52588761/ Log: win32 doesn't have unistd.h diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -11,7 +11,10 @@ from rpython.rlib import rposix from rpython.rlib.rstring import StringBuilder -eci = ExternalCompilationInfo(includes=['stdio.h', 'unistd.h', 'sys/types.h']) +includes = ['stdio.h', 'sys/types.h'] +if os.name == "posix": + includes += ['unistd.h'] +eci = ExternalCompilationInfo(includes=includes) def llexternal(*args, **kwargs): return rffi.llexternal(*args, compilation_info=eci, **kwargs) From noreply at buildbot.pypy.org Tue Mar 18 02:36:06 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 02:36:06 +0100 (CET) Subject: [pypy-commit] pypy default: skip this test on older cpythons Message-ID: <20140318013606.B34DF1C3273@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70037:67fa0a7737b8 Date: 2014-03-17 21:34 -0400 http://bitbucket.org/pypy/pypy/changeset/67fa0a7737b8/ Log: skip this test on older cpythons diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -200,6 +200,10 @@ assert f.closed == True def test_repr(self): + import sys + if '__pypy__' not in sys.builtin_module_names and \ + sys.version_info < (2, 7, 4): + skip("see cpython issue14161") assert repr(self.file).startswith( " Author: Brian Kearns Branch: Changeset: r70038:51832197b83d Date: 2014-03-17 18:44 -0700 http://bitbucket.org/pypy/pypy/changeset/51832197b83d/ Log: fix test_stat_exception on win32 diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -178,17 +178,9 @@ import sys import errno for fn in [self.posix.stat, self.posix.lstat]: - try: - fn("nonexistentdir/nonexistentfile") - except OSError, e: - assert e.errno == errno.ENOENT - assert e.filename == "nonexistentdir/nonexistentfile" - # On Windows, when the parent directory does not exist, - # the winerror is 3 (cannot find the path specified) - # instead of 2 (cannot find the file specified) - if sys.platform == 'win32': - assert isinstance(e, WindowsError) - assert e.winerror == 3 + exc = raises(OSError, fn, "nonexistentdir/nonexistentfile") + assert exc.value.errno == errno.ENOENT + assert exc.value.filename == "nonexistentdir/nonexistentfile" if hasattr(__import__(os.name), "statvfs"): def test_statvfs(self): From noreply at buildbot.pypy.org Tue Mar 18 02:59:04 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 02:59:04 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140318015904.4EE4A1C357E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70039:05ec203821cc Date: 2014-03-17 21:57 -0400 http://bitbucket.org/pypy/pypy/changeset/05ec203821cc/ Log: cleanup diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -397,10 +397,10 @@ includes = ['sys/time.h'] else: includes = ['time.h'] + eci = ExternalCompilationInfo(includes=includes) + class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes=includes - ) + _compilation_info_ = eci HAVE_UTIMES = platform.Has('utimes') config = platform.configure(CConfig) @@ -409,21 +409,14 @@ if config['HAVE_UTIMES']: class CConfig: - if not _WIN32: - _compilation_info_ = ExternalCompilationInfo( - includes = includes - ) - else: - _compilation_info_ = ExternalCompilationInfo( - includes = ['time.h'] - ) + _compilation_info_ = eci TIMEVAL = platform.Struct('struct timeval', [('tv_sec', rffi.LONG), ('tv_usec', rffi.LONG)]) config = platform.configure(CConfig) TIMEVAL = config['TIMEVAL'] TIMEVAL2P = rffi.CArrayPtr(TIMEVAL) os_utimes = self.llexternal('utimes', [rffi.CCHARP, TIMEVAL2P], - rffi.INT, compilation_info=CConfig._compilation_info_) + rffi.INT, compilation_info=eci) def os_utime_platform(path, actime, modtime): import math From noreply at buildbot.pypy.org Tue Mar 18 03:09:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 03:09:01 +0100 (CET) Subject: [pypy-commit] pypy default: skip pwd test_ztranslation on non-posix Message-ID: <20140318020901.2D5A01C35F0@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70040:34c8e179633d Date: 2014-03-17 19:08 -0700 http://bitbucket.org/pypy/pypy/changeset/34c8e179633d/ Log: skip pwd test_ztranslation on non-posix diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -1,8 +1,8 @@ import os -import py +import pytest if os.name != 'posix': - py.test.skip('pwd module only available on unix') + pytest.skip('pwd module only available on unix') class AppTestPwd: spaceconfig = dict(usemodules=['pwd']) diff --git a/pypy/module/pwd/test/test_ztranslation.py b/pypy/module/pwd/test/test_ztranslation.py --- a/pypy/module/pwd/test/test_ztranslation.py +++ b/pypy/module/pwd/test/test_ztranslation.py @@ -1,5 +1,9 @@ +import os +import pytest from pypy.objspace.fake.checkmodule import checkmodule +if os.name != 'posix': + pytest.skip('pwd module only available on unix') def test_checkmodule(): checkmodule('pwd') From noreply at buildbot.pypy.org Tue Mar 18 07:01:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 07:01:01 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140318060101.BD0E41C0A66@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70041:16c7d08815c0 Date: 2014-03-18 01:58 -0400 http://bitbucket.org/pypy/pypy/changeset/16c7d08815c0/ Log: cleanup diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -247,12 +247,10 @@ fromlist_w = None rel_modulename = None - if (level != 0 and - w_globals is not None and - space.isinstance_w(w_globals, space.w_dict)): - - rel_modulename, rel_level = _get_relative_name(space, modulename, level, w_globals) - + if (level != 0 and w_globals is not None and + space.isinstance_w(w_globals, space.w_dict)): + rel_modulename, rel_level = _get_relative_name(space, modulename, level, + w_globals) if rel_modulename: # if no level was set, ignore import errors, and # fall back to absolute import at the end of the @@ -601,7 +599,7 @@ try: if find_info.modtype == PY_SOURCE: load_source_module( - space, w_modulename, w_mod, + space, w_modulename, w_mod, find_info.filename, find_info.stream.readall(), find_info.stream.try_to_find_file_descriptor()) return w_mod diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -174,7 +174,6 @@ import notapackage import warnings - warnings.simplefilter('error', ImportWarning) try: raises(ImportWarning, imp) @@ -412,7 +411,7 @@ def test_future_relative_import_level_1(self): from pkg import relative_c assert relative_c.inpackage == 1 - + def test_future_relative_import_level_2(self): from pkg.pkg1 import relative_d assert relative_d.inpackage == 1 @@ -670,10 +669,7 @@ import imp import pkg import os - - info = ('.py', 'r', imp.PY_SOURCE) pathname = os.path.join(os.path.dirname(pkg.__file__), 'a.py') - module = imp.load_module('a', open(pathname), 'invalid_path_name', ('.py', 'r', imp.PY_SOURCE)) assert module.__name__ == 'a' @@ -1128,7 +1124,7 @@ def load_module(self, name): sys.modules[name] = sys return sys - + def importer_for_path(path): if path == "xxx": return Importer() diff --git a/pypy/module/marshal/__init__.py b/pypy/module/marshal/__init__.py --- a/pypy/module/marshal/__init__.py +++ b/pypy/module/marshal/__init__.py @@ -8,7 +8,7 @@ appleveldefs = { } - + interpleveldefs = { 'dump' : 'interp_marshal.dump', 'dumps' : 'interp_marshal.dumps', From noreply at buildbot.pypy.org Tue Mar 18 07:41:56 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 18 Mar 2014 07:41:56 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: fix Message-ID: <20140318064156.1F4651C00B9@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70042:06f75ff60246 Date: 2014-03-18 08:41 +0200 http://bitbucket.org/pypy/pypy/changeset/06f75ff60246/ Log: fix diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1855,7 +1855,6 @@ extradescr = self.cpu.fielddescrof(op.args[1].concretetype.TO, 'entries') return self._handle_oopspec_call(op, args, EffectInfo.OS_DICT_LOOKUP, - EffectInfo.EF_CAN_RAISE, extradescr=extradescr) def _handle_rgc_call(self, op, oopspec_name, args): From noreply at buildbot.pypy.org Tue Mar 18 08:07:58 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 08:07:58 +0100 (CET) Subject: [pypy-commit] pypy default: only look in sys.modules if force_init=False Message-ID: <20140318070758.9130F1C0362@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70043:310dcc241b1f Date: 2014-03-18 03:01 -0400 http://bitbucket.org/pypy/pypy/changeset/310dcc241b1f/ Log: only look in sys.modules if force_init=False diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -435,14 +435,12 @@ def getbuiltinmodule(self, name, force_init=False): w_name = self.wrap(name) w_modules = self.sys.get('modules') - try: - w_mod = self.getitem(w_modules, w_name) - except OperationError, e: - if not e.match(self, self.w_KeyError): - raise - else: - if not force_init: - return w_mod + if not force_init: + try: + return self.getitem(w_modules, w_name) + except OperationError, e: + if not e.match(self, self.w_KeyError): + raise # If the module is a builtin but not yet imported, # retrieve it and initialize it @@ -453,13 +451,13 @@ "getbuiltinmodule() called with non-builtin module %s", name) else: - # Add the module to sys.modules - self.setitem(w_modules, w_name, w_mod) - # And initialize it from pypy.interpreter.module import Module if isinstance(w_mod, Module): w_mod.init(self) + + # Add the module to sys.modules + self.setitem(w_modules, w_name, w_mod) return w_mod def get_builtinmodule_to_install(self): diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -574,6 +574,7 @@ def load_module(space, w_modulename, find_info, reuse=False): if find_info is None: return + if find_info.w_loader: return space.call_method(find_info.w_loader, "load_module", w_modulename) From noreply at buildbot.pypy.org Tue Mar 18 08:35:08 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 08:35:08 +0100 (CET) Subject: [pypy-commit] pypy default: fix reimport/reload of builtin modules (issue1514) Message-ID: <20140318073508.F1CCB1D2540@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70044:0ecfb7242213 Date: 2014-03-18 03:33 -0400 http://bitbucket.org/pypy/pypy/changeset/0ecfb7242213/ Log: fix reimport/reload of builtin modules (issue1514) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -432,10 +432,11 @@ return name - def getbuiltinmodule(self, name, force_init=False): + def getbuiltinmodule(self, name, force_init=False, reuse=True): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: + assert reuse is True try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -454,6 +455,8 @@ # And initialize it from pypy.interpreter.module import Module if isinstance(w_mod, Module): + if not reuse: + w_mod = type(w_mod)(self, w_name) w_mod.init(self) # Add the module to sys.modules diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -579,7 +579,7 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True) + return space.getbuiltinmodule(find_info.filename, force_init=True, reuse=reuse) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -203,7 +203,6 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 - skip("fix me") import sys, marshal old = marshal.loads @@ -223,7 +222,6 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( - skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -578,7 +578,6 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): - skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -586,7 +585,6 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): - skip("fix me") import sys, time oldpath = sys.path time.tzset = "" From noreply at buildbot.pypy.org Tue Mar 18 08:40:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 08:40:21 +0100 (CET) Subject: [pypy-commit] stmgc gc-small-uniform: A branch to redo small uniform allocations Message-ID: <20140318074021.16A321C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1059:f4dcc59b09e6 Date: 2014-03-17 11:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/f4dcc59b09e6/ Log: A branch to redo small uniform allocations From noreply at buildbot.pypy.org Tue Mar 18 08:40:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 08:40:22 +0100 (CET) Subject: [pypy-commit] stmgc gc-small-uniform: Get started Message-ID: <20140318074022.422A01C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1060:f1e60e1cb9cf Date: 2014-03-17 11:19 +0100 http://bitbucket.org/pypy/stmgc/changeset/f1e60e1cb9cf/ Log: Get started diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -83,7 +83,7 @@ (outside the nursery), then it fits into one page. This is the common case. Otherwise, we need to compute it based on its location and size. */ - if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) { + if (is_small_uniform(obj)) { page_privatize(first_page); } else { @@ -272,16 +272,16 @@ assert(_has_mutex_pages()); assert(!_is_young(obj)); - char *segment_base = get_segment_base(source_segment_num); uintptr_t start = (uintptr_t)obj; uintptr_t first_page = start / 4096UL; - struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(segment_base, obj); - if (realobj->stm_flags & GCFLAG_SMALL_UNIFORM) { + if (is_small_uniform(obj)) { abort();//XXX WRITE THE FAST CASE } else { + char *segment_base = get_segment_base(source_segment_num); + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(segment_base, obj); ssize_t obj_size = stmcb_size_rounded_up(realobj); assert(obj_size >= 16); uintptr_t end = start + obj_size; @@ -334,7 +334,7 @@ uintptr_t start = (uintptr_t)obj; uintptr_t first_page = start / 4096UL; - if (obj->stm_flags & GCFLAG_SMALL_UNIFORM) { + if (is_small_uniform(obj)) { abort();//XXX WRITE THE FAST CASE } else { @@ -488,6 +488,9 @@ /* update 'overflow_number' if needed */ if (STM_PSEGMENT->overflow_number_has_been_used) { highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0; + /* Note that the overflow number cannot be entirely 1 bits; + this prevents stm_flags from ever containing the value -1, + which might be confused with GCWORD_MOVED. */ assert(highest_overflow_number != /* XXX else, overflow! */ (uint32_t)-GCFLAG_OVERFLOW_NUMBER_bit0); STM_PSEGMENT->overflow_number = highest_overflow_number; diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -41,18 +41,13 @@ */ GCFLAG_WRITE_BARRIER = _STM_GCFLAG_WRITE_BARRIER, - /* This flag is set by gcpage.c for all objects living in - uniformly-sized pages of small objects. - */ - GCFLAG_SMALL_UNIFORM = 0x02, - /* The following flag is set on nursery objects of which we asked the id or the identityhash. It means that a space of the size of the object has already been allocated in the nonmovable part. The same flag is abused to mark prebuilt objects whose hash has been taken during translation and is statically recorded just after the object. */ - GCFLAG_HAS_SHADOW = 0x04, + GCFLAG_HAS_SHADOW = 0x2, /* All remaining bits of the 32-bit 'stm_flags' field are taken by the "overflow number". This is a number that identifies the @@ -61,7 +56,7 @@ current transaction that have been flushed out of the nursery, which occurs if the same transaction allocates too many objects. */ - GCFLAG_OVERFLOW_NUMBER_bit0 = 0x8 /* must be last */ + GCFLAG_OVERFLOW_NUMBER_bit0 = 0x4 /* must be last */ }; @@ -157,6 +152,9 @@ #ifndef NDEBUG pthread_t running_pthread; #endif + + /* This is for smallmalloc.c */ + struct small_malloc_data_s small_malloc_data; }; enum /* safe_point */ { diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -19,8 +19,6 @@ static void teardown_gcpage(void) { - memset(small_alloc, 0, sizeof(small_alloc)); - free_uniform_pages = NULL; LIST_FREE(testing_prebuilt_objs); if (tree_prebuilt_objs != NULL) { tree_free(tree_prebuilt_objs); @@ -29,52 +27,11 @@ } -#define GCPAGE_NUM_PAGES 20 - static void setup_N_pages(char *pages_addr, uint64_t num) { pages_initialize_shared((pages_addr - stm_object_pages) / 4096UL, num); } -static void grab_more_free_pages_for_small_allocations(void) -{ - /* grab N (= GCPAGE_NUM_PAGES) pages out of the top addresses */ - uintptr_t decrease_by = GCPAGE_NUM_PAGES * 4096; - if (uninitialized_page_stop - uninitialized_page_start <= decrease_by) - goto out_of_memory; - - uninitialized_page_stop -= decrease_by; - - if (!_stm_largemalloc_resize_arena(uninitialized_page_stop - - uninitialized_page_start)) - goto out_of_memory; - - setup_N_pages(uninitialized_page_start, GCPAGE_NUM_PAGES); - - char *p = uninitialized_page_start; - long i; - for (i = 0; i < 16; i++) { - *(char **)p = free_uniform_pages; - free_uniform_pages = p; - } - return; - - out_of_memory: - stm_fatalerror("out of memory!\n"); /* XXX */ -} - -static char *_allocate_small_slowpath(uint64_t size) -{ - /* not thread-safe! Use only when holding the mutex */ - assert(_has_mutex()); - - if (free_uniform_pages == NULL) - grab_more_free_pages_for_small_allocations(); - - abort();//... -} - - static char *allocate_outside_nursery_large(uint64_t size) { /* thread-safe: use the lock of pages.c to prevent any remapping @@ -195,14 +152,14 @@ static uintptr_t object_last_page(object_t *obj) { uintptr_t lastbyte; - struct object_s *realobj = - (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); - if (realobj->stm_flags & GCFLAG_SMALL_UNIFORM) { + if (is_small_uniform(obj)) { lastbyte = (uintptr_t)obj; } else { /* get the size of the object */ + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(stm_object_pages, obj); size_t obj_size = stmcb_size_rounded_up(realobj); /* that's the last byte within the object */ diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -1,19 +1,12 @@ -/* Outside the nursery, we are taking from the highest addresses - complete pages, one at a time, which uniformly contain objects of - size "8 * N" for some N in range(2, GC_N_SMALL_REQUESTS). We are - taking from the lowest addresses "large" objects, which are at least - 288 bytes long, allocated by largemalloc.c. The limit is the same - as used in PyPy's default GC. -*/ - -#define GC_N_SMALL_REQUESTS 36 - -/* More parameters fished directly from PyPy's default GC +/* Some parameters fished directly from PyPy's default GC XXX document me */ #define GC_MIN (NB_NURSERY_PAGES * 4096 * 8) #define GC_MAJOR_COLLECT 1.82 +/* Granularity when grabbing more unused pages: take 50 at a time */ +#define GCPAGE_NUM_PAGES 50 + /* re-share pages after major collections (1 or 0) */ #define RESHARE_PAGES 1 @@ -23,16 +16,6 @@ static char *uninitialized_page_stop; -struct small_alloc_s { - char *next_object; /* the next address we will return, or NULL */ - char *range_last; /* if equal to next_object: next_object starts with - a next pointer; if greater: last item of a - contiguous range of unallocated objs */ -}; - -static struct small_alloc_s small_alloc[GC_N_SMALL_REQUESTS]; -static char *free_uniform_pages; - static void setup_gcpage(void); static void teardown_gcpage(void); static char *allocate_outside_nursery_large(uint64_t size); @@ -40,29 +23,4 @@ static void major_collection_if_requested(void); static void major_collection_now_at_safe_point(void); static bool largemalloc_keep_object_at(char *data); /* for largemalloc.c */ - - -static char *_allocate_small_slowpath(uint64_t size); - -static inline char *allocate_outside_nursery_small(uint64_t size) -{ - uint64_t index = size / 8; - OPT_ASSERT(2 <= index); - OPT_ASSERT(index < GC_N_SMALL_REQUESTS); - - char *result = small_alloc[index].next_object; - if (result == NULL) - return _allocate_small_slowpath(size); - - char *following; - if (small_alloc[index].range_last == result) { - following = ((char **)result)[0]; - small_alloc[index].range_last = ((char **)result)[1]; - } - else { - following = result + size; - } - small_alloc[index].next_object = following; - - return result; -} +static void setup_N_pages(char *pages_addr, uint64_t num); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -111,6 +111,7 @@ teardown_sync(); teardown_gcpage(); teardown_nursery(); + teardown_smallmalloc(); teardown_pages(); } diff --git a/c7/stm/smallmalloc.c b/c7/stm/smallmalloc.c new file mode 100644 --- /dev/null +++ b/c7/stm/smallmalloc.c @@ -0,0 +1,128 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +static void teardown_smallmalloc(void) +{ + memset(small_page_lists, 0, sizeof(small_page_lists)); + assert(free_uniform_pages == NULL); + first_small_uniform_loc = (uintptr_t) -1; +} + +static void grab_more_free_pages_for_small_allocations(void) +{ + /* Grab GCPAGE_NUM_PAGES pages out of the top addresses. Use the + lock of pages.c to prevent any remapping from occurring under our + feet. + */ + mutex_pages_lock(); + + if (free_uniform_pages == NULL) { + + uintptr_t decrease_by = GCPAGE_NUM_PAGES * 4096; + if (uninitialized_page_stop - uninitialized_page_start < decrease_by) + goto out_of_memory; + + uninitialized_page_stop -= decrease_by; + first_small_uniform_loc = (uintptr_t)uninitialized_page_stop; + + char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; + if (!_stm_largemalloc_resize_arena(uninitialized_page_stop - base)) + goto out_of_memory; + + setup_N_pages(uninitialized_page_stop, GCPAGE_NUM_PAGES); + + char *p = uninitialized_page_stop; + long i; + for (i = 0; i < GCPAGE_NUM_PAGES; i++) { + ((struct small_page_list_s *)p)->nextpage = free_uniform_pages; + free_uniform_pages = (struct small_page_list_s *)p; + p += 4096; + } + } + + mutex_pages_unlock(); + return; + + out_of_memory: + stm_fatalerror("out of memory!\n"); /* XXX */ +} + +static char *_allocate_small_slowpath(uint64_t size) +{ + /* First try to grab the next page from the global 'small_page_list' + */ + long n = size / 8; + struct small_page_list_s *smallpage; + struct small_free_loc_s *TLPREFIX *fl = + &STM_PSEGMENT->small_malloc_data.loc_free[n]; + assert(*fl == NULL); + + retry: + smallpage = small_page_lists[n]; + if (smallpage != NULL) { + if (UNLIKELY(!__sync_bool_compare_and_swap(&small_page_lists[n], + smallpage, + smallpage->nextpage))) + goto retry; + + /* Succeeded: we have a page in 'smallpage' */ + *fl = smallpage->header.next; + return (char *)smallpage; + } + + /* There is no more page waiting. Maybe we can pick one from + free_uniform_pages. + */ + smallpage = free_uniform_pages; + if (smallpage != NULL) { + if (UNLIKELY(!__sync_bool_compare_and_swap(&free_uniform_pages, + smallpage, + smallpage->nextpage))) + goto retry; + + /* Succeeded: we have a page in 'smallpage', which is not + initialized so far, apart from the 'nextpage' field read + above. Initialize it. + */ + assert(!(((uintptr_t)smallpage) & 4095)); + struct small_free_loc_s *p, *following = NULL; + + /* Initialize all slots from the second one to the last one to + contain a chained list */ + uintptr_t i = size; + while (i <= 4096 - size) { + p = (struct small_free_loc_s *)(((char *)smallpage) + i); + p->next = following; + following = p; + i += size; + } + + /* The first slot is immediately returned */ + *fl = following; + return (char *)smallpage; + } + + /* Not a single free page left. Grab some more free pges and retry. */ + grab_more_free_pages_for_small_allocations(); + goto retry; +} + +__attribute__((always_inline)) +static inline char *allocate_outside_nursery_small(uint64_t size) +{ + OPT_ASSERT((size & 7) == 0); + OPT_ASSERT(16 <= size && size < 8 * GC_N_SMALL_REQUESTS); + + struct small_free_loc_s *TLPREFIX *fl = + &STM_PSEGMENT->small_malloc_data.loc_free[size / 8]; + + struct small_free_loc_s *result = *fl; + + if (UNLIKELY(result == NULL)) + return _allocate_small_slowpath(size); + + *fl = result->next; + return (char *)result; +} diff --git a/c7/stm/smallmalloc.h b/c7/stm/smallmalloc.h new file mode 100644 --- /dev/null +++ b/c7/stm/smallmalloc.h @@ -0,0 +1,65 @@ + +/* Outside the nursery, we are taking from the highest addresses + complete pages, one at a time, which uniformly contain objects of + size "8 * N" for some N in range(2, GC_N_SMALL_REQUESTS). We are + taking from the lowest addresses "large" objects, which are at least + 288 bytes long, allocated by largemalloc.c. The limit is the same + as used in PyPy's default GC. +*/ + +#define GC_N_SMALL_REQUESTS 36 + + +struct small_free_loc_s { + struct small_free_loc_s *next; +}; + +struct small_page_list_s { + /* A chained list of locations within the same page which are + free. */ + struct small_free_loc_s header; + + /* A chained list of all small pages containing objects of + a given small size, and that have at least one free object. */ + struct small_page_list_s *nextpage; + + /* This structure is only two words, so it always fits inside one + free slot inside the page. */ +}; + + +/* For every size from 16 bytes to 8*(GC_N_SMALL_REQUESTS-1), this is + a list of pages that contain objects of that size and have at least + one free location. Additionally, the item 0 in the following list + is a chained list of fully-free pages (which can be reused for a + different size than the one they originally contained). +*/ +static struct small_page_list_s *small_page_lists[GC_N_SMALL_REQUESTS]; + +#define free_uniform_pages (small_page_lists[0]) + + +/* For is_small_uniform(). */ +static uintptr_t first_small_uniform_loc = (uintptr_t) -1; + + +/* This is a definition for 'STM_PSEGMENT->small_malloc_data'. Each + segment grabs one page at a time from the global list, and then + requests for data are answered locally. +*/ +struct small_malloc_data_s { + struct small_free_loc_s *loc_free[GC_N_SMALL_REQUESTS]; +}; + + +/* Functions + */ +static inline char *allocate_outside_nursery_small(uint64_t size) + __attribute__((always_inline)); + +static char *_allocate_small_slowpath(uint64_t size); +static void teardown_smallmalloc(void); + +static inline bool is_small_uniform(object_t *obj) { + return ((uintptr_t)obj) >= first_small_uniform_loc; +} diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -2,6 +2,7 @@ #include "stmgc.h" #include "stm/atomic.h" #include "stm/list.h" +#include "stm/smallmalloc.h" #include "stm/core.h" #include "stm/pagecopy.h" #include "stm/pages.h" @@ -21,6 +22,7 @@ #include "stm/prebuilt.c" #include "stm/gcpage.c" #include "stm/largemalloc.c" +#include "stm/smallmalloc.c" #include "stm/nursery.c" #include "stm/sync.c" #include "stm/setup.c" From noreply at buildbot.pypy.org Tue Mar 18 08:40:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 08:40:23 +0100 (CET) Subject: [pypy-commit] stmgc gc-small-uniform: in-progress Message-ID: <20140318074023.6DD041C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1061:34fbe565894a Date: 2014-03-18 08:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/34fbe565894a/ Log: in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -84,6 +84,7 @@ the common case. Otherwise, we need to compute it based on its location and size. */ if (is_small_uniform(obj)) { + abort(); page_privatize(first_page); } else { diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -97,7 +97,7 @@ obj->stm_flags &= ~GCFLAG_HAS_SHADOW; realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); size = stmcb_size_rounded_up((struct object_s *)realobj); - goto copy_large_object; + goto handle_large_object; } } /* We need to make a copy of this object. It goes either in @@ -107,25 +107,27 @@ realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); size = stmcb_size_rounded_up((struct object_s *)realobj); - if (1 /*size >= GC_N_SMALL_REQUESTS*8*/) { + if (size >= GC_N_SMALL_REQUESTS) { /* case 1: object is not small enough. Ask gcpage.c for an allocation via largemalloc. */ char *allocated = allocate_outside_nursery_large(size); nobj = (object_t *)(allocated - stm_object_pages); - /* Copy the object */ - copy_large_object:; - char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); - memcpy(realnobj, realobj, size); - + handle_large_object: nobj_sync_now = ((uintptr_t)nobj) | FLAG_SYNC_LARGE; } else { /* case "small enough" */ - abort(); //... + char *allocated = allocate_outside_nursery_small(size); + nobj = (object_t *)(allocated - stm_object_pages); + nobj_sync_now = (uintptr_t)nobj; } + /* Copy the object */ + char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); + memcpy(realnobj, realobj, size); + /* Done copying the object. */ //dprintf(("\t\t\t\t\t%p -> %p\n", obj, nobj)); pforwarded_array[0] = GCWORD_MOVED; @@ -153,6 +155,7 @@ /* Must trace the object later */ LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj_sync_now); + assert(nobj_sync_now == ((uintptr_t)nobj | is_small_uniform(nobj))); } static void collect_roots_in_nursery(void) diff --git a/c7/stm/smallmalloc.c b/c7/stm/smallmalloc.c --- a/c7/stm/smallmalloc.c +++ b/c7/stm/smallmalloc.c @@ -25,7 +25,7 @@ goto out_of_memory; uninitialized_page_stop -= decrease_by; - first_small_uniform_loc = (uintptr_t)uninitialized_page_stop; + first_small_uniform_loc = uninitialized_page_stop - stm_object_pages; char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; if (!_stm_largemalloc_resize_arena(uninitialized_page_stop - base)) @@ -51,8 +51,6 @@ static char *_allocate_small_slowpath(uint64_t size) { - /* First try to grab the next page from the global 'small_page_list' - */ long n = size / 8; struct small_page_list_s *smallpage; struct small_free_loc_s *TLPREFIX *fl = @@ -60,6 +58,8 @@ assert(*fl == NULL); retry: + /* First try to grab the next page from the global 'small_page_list' + */ smallpage = small_page_lists[n]; if (smallpage != NULL) { if (UNLIKELY(!__sync_bool_compare_and_swap(&small_page_lists[n], @@ -72,8 +72,8 @@ return (char *)smallpage; } - /* There is no more page waiting. Maybe we can pick one from - free_uniform_pages. + /* There is no more page waiting for the correct size of objects. + Maybe we can pick one from free_uniform_pages. */ smallpage = free_uniform_pages; if (smallpage != NULL) { @@ -104,7 +104,8 @@ return (char *)smallpage; } - /* Not a single free page left. Grab some more free pges and retry. */ + /* Not a single free page left. Grab some more free pages and retry. + */ grab_more_free_pages_for_small_allocations(); goto retry; } From noreply at buildbot.pypy.org Tue Mar 18 08:49:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 08:49:01 +0100 (CET) Subject: [pypy-commit] pypy default: Backed out changeset 0ecfb7242213 Message-ID: <20140318074901.E4A6A1D26D4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70045:7c6999457664 Date: 2014-03-18 03:48 -0400 http://bitbucket.org/pypy/pypy/changeset/7c6999457664/ Log: Backed out changeset 0ecfb7242213 diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -432,11 +432,10 @@ return name - def getbuiltinmodule(self, name, force_init=False, reuse=True): + def getbuiltinmodule(self, name, force_init=False): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: - assert reuse is True try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -455,8 +454,6 @@ # And initialize it from pypy.interpreter.module import Module if isinstance(w_mod, Module): - if not reuse: - w_mod = type(w_mod)(self, w_name) w_mod.init(self) # Add the module to sys.modules diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -579,7 +579,7 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True, reuse=reuse) + return space.getbuiltinmodule(find_info.filename, force_init=True) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -203,6 +203,7 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 + skip("fix me") import sys, marshal old = marshal.loads @@ -222,6 +223,7 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( + skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -578,6 +578,7 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): + skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -585,6 +586,7 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): + skip("fix me") import sys, time oldpath = sys.path time.tzset = "" From noreply at buildbot.pypy.org Tue Mar 18 08:49:29 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 18 Mar 2014 08:49:29 +0100 (CET) Subject: [pypy-commit] pypy elidable-canfold-exception: Start a branch to constant-fold some errors in @elidable. Begin with Message-ID: <20140318074929.CA2CB1D26D4@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: elidable-canfold-exception Changeset: r70046:83468ba5580d Date: 2014-03-18 09:48 +0200 http://bitbucket.org/pypy/pypy/changeset/83468ba5580d/ Log: Start a branch to constant-fold some errors in @elidable. Begin with changing all @elidable to @elidable() diff --git a/pypy/interpreter/signature.py b/pypy/interpreter/signature.py --- a/pypy/interpreter/signature.py +++ b/pypy/interpreter/signature.py @@ -10,7 +10,7 @@ self.varargname = varargname self.kwargname = kwargname - @jit.elidable + @jit.elidable() def find_argname(self, name): try: return self.argnames.index(name) @@ -69,4 +69,4 @@ return self.varargname if i == 2: return self.kwargname - raise IndexError \ No newline at end of file + raise IndexError diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -135,7 +135,7 @@ msg = "list or tuple or dict" raise self._convert_error(msg, w_ob) - @jit.elidable + @jit.elidable() def _getcfield_const(self, attr): return self.fields_dict[attr] diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -100,7 +100,7 @@ def get_codec_from_cache(self, key): return self._get_codec_with_version(key, self.version) - @jit.elidable + @jit.elidable() def _get_codec_with_version(self, key, version): return self.codec_search_cache.get(key, None) diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -150,7 +150,7 @@ factor * float(self.ll_it), w_sublist) return space.wrap(w_se) - @jit.elidable + @jit.elidable() def _get_or_make_subentry(self, entry, make=True): try: return self.calls[entry] @@ -302,7 +302,7 @@ c_setup_profiling() space.getexecutioncontext().setllprofile(lsprof_call, space.wrap(self)) - @jit.elidable + @jit.elidable() def _get_or_make_entry(self, f_code, make=True): try: return self.data[f_code] @@ -313,7 +313,7 @@ return entry raise - @jit.elidable + @jit.elidable() def _get_or_make_builtin_entry(self, key, make=True): try: return self.builtin_data[key] diff --git a/pypy/module/_rawffi/alt/interp_ffitype.py b/pypy/module/_rawffi/alt/interp_ffitype.py --- a/pypy/module/_rawffi/alt/interp_ffitype.py +++ b/pypy/module/_rawffi/alt/interp_ffitype.py @@ -17,7 +17,7 @@ self.w_pointer_to = w_pointer_to self.set_ffitype(ffitype) - @jit.elidable + @jit.elidable() def get_ffitype(self): if not self._ffitype: raise ValueError("Operation not permitted on an incomplete type") diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -342,10 +342,10 @@ return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_Arg(l=size)])) def c_deallocate_function_args(space, cargs): call_capi(space, 'deallocate_function_args', [_Arg(vp=cargs)]) - at jit.elidable + at jit.elidable() def c_function_arg_sizeof(space): return _cdata_to_size_t(space, call_capi(space, 'function_arg_sizeof', [])) - at jit.elidable + at jit.elidable() def c_function_arg_typeoffset(space): return _cdata_to_size_t(space, call_capi(space, 'function_arg_typeoffset', [])) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -121,7 +121,7 @@ def check_sys_modules_w(space, modulename): return space.finditem_str(space.sys.get('modules'), modulename) - at jit.elidable + at jit.elidable() def _get_dot_position(str, n): # return the index in str of the '.' such that there are n '.'-separated # strings after it diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -43,7 +43,7 @@ else: return obj._mapdict_read_storage(attr.storageindex) - @jit.elidable + @jit.elidable() def _pure_mapdict_read_storage(self, obj, storageindex): return obj._mapdict_read_storage(storageindex) @@ -68,7 +68,7 @@ else: return self._find_map_attr_indirection(selector) - @jit.elidable + @jit.elidable() def _find_map_attr_jit_pure(self, name, index): return self._find_map_attr_indirection((name, index)) @@ -128,14 +128,14 @@ def set_terminator(self, obj, terminator): raise NotImplementedError("abstract base class") - @jit.elidable + @jit.elidable() def size_estimate(self): return self._size_estimate >> NUM_DIGITS def search(self, attrtype): return None - @jit.elidable + @jit.elidable() def _get_new_attr(self, name, index): selector = name, index cache = self.cache_attrs diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -585,7 +585,7 @@ return self return self._new(self.title(selfval)) - @jit.elidable + @jit.elidable() def title(self, value): builder = self._builder(len(value)) previous_is_cased = False diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -259,7 +259,7 @@ return w_value return w_value - @elidable + @elidable() def _pure_getdictvalue_no_unwrapping(w_self, space, version_tag, attr): return w_self._getdictvalue_no_unwrapping(space, attr) @@ -371,7 +371,7 @@ w_class, w_value = w_self._pure_lookup_where_with_method_cache(name, version_tag) return w_class, unwrap_cell(space, w_value) - @elidable + @elidable() def _pure_lookup_where_with_method_cache(w_self, name, version_tag): space = w_self.space cache = space.fromcache(MethodCache) diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -634,7 +634,7 @@ self.run('compile_framework_external_exception_handling') def define_compile_framework_bug1(self): - @elidable + @elidable() def nonmoving(): x = X(1) for i in range(7): diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -216,12 +216,12 @@ _ll_3_list_resize_hint_really = rlist_ll._ll_list_resize_hint_really - at elidable + at elidable() def _ll_1_gc_identityhash(x): return lltype.identityhash(x) -# the following function should not be "@elidable": I can think of +# the following function should not be "@elidable()": I can think of # a corner case in which id(const) is constant-folded, and then 'const' # disappears and is collected too early (possibly causing another object # with the same id() to appear). diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -122,7 +122,7 @@ class B(A): x = 2 - @jit.elidable + @jit.elidable() def g(cls): return cls() diff --git a/rpython/jit/codewriter/test/test_policy.py b/rpython/jit/codewriter/test/test_policy.py --- a/rpython/jit/codewriter/test/test_policy.py +++ b/rpython/jit/codewriter/test/test_policy.py @@ -52,7 +52,7 @@ assert not policy.look_inside_graph(graph) def test_elidable(): - @jit.elidable + @jit.elidable() def g(x): return x + 2 graph = support.getgraph(g, [5]) diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -413,7 +413,7 @@ call=0, call_pure=0) def test_residual_call_elidable_1(self): - @elidable + @elidable() def externfn(x, y): return x * y def f(n): @@ -426,7 +426,7 @@ def test_residual_call_elidable_2(self): myjitdriver = JitDriver(greens = [], reds = ['n']) - @elidable + @elidable() def externfn(x): return x - 1 def f(n): @@ -443,7 +443,7 @@ def test_constfold_call_elidable(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - @elidable + @elidable() def externfn(x): return x - 3 def f(n, m): @@ -459,7 +459,7 @@ def test_constfold_call_elidable_2(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - @elidable + @elidable() def externfn(x): return x - 3 class V: @@ -484,7 +484,7 @@ self.x = x v1 = V(1) v2 = V(2) - @elidable + @elidable() def externfn(x): if x: return v1 @@ -504,7 +504,7 @@ def test_elidable_raising(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - @elidable + @elidable() def externfn(x): if x <= 0: raise ValueError @@ -530,7 +530,7 @@ def test_elidable_raising_2(self): myjitdriver = JitDriver(greens = ['m'], reds = ['n']) - @elidable + @elidable() def externfn(x): if x <= 0: raise ValueError diff --git a/rpython/jit/metainterp/test/test_call.py b/rpython/jit/metainterp/test/test_call.py --- a/rpython/jit/metainterp/test/test_call.py +++ b/rpython/jit/metainterp/test/test_call.py @@ -28,7 +28,7 @@ def test_call_elidable_none(self): d = {} - @jit.elidable + @jit.elidable() def f(a): return d.get(a, None) diff --git a/rpython/jit/metainterp/test/test_jitprof.py b/rpython/jit/metainterp/test/test_jitprof.py --- a/rpython/jit/metainterp/test/test_jitprof.py +++ b/rpython/jit/metainterp/test/test_jitprof.py @@ -77,7 +77,7 @@ assert profiler.calls == 1 def test_blackhole_pure(self): - @elidable + @elidable() def g(n): return n+1 diff --git a/rpython/jit/metainterp/test/test_send.py b/rpython/jit/metainterp/test/test_send.py --- a/rpython/jit/metainterp/test/test_send.py +++ b/rpython/jit/metainterp/test/test_send.py @@ -596,7 +596,7 @@ def test_constfold_pure_oosend(self): myjitdriver = JitDriver(greens=[], reds = ['i', 'obj']) class A: - @elidable + @elidable() def foo(self): return 42 def fn(n, i): diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -411,7 +411,7 @@ a1.y = 6 a2 = A() a2.y = 13 - @jit.elidable + @jit.elidable() def f(b): return b + 1 def fn(n): diff --git a/rpython/jit/tl/tlc.py b/rpython/jit/tl/tlc.py --- a/rpython/jit/tl/tlc.py +++ b/rpython/jit/tl/tlc.py @@ -72,7 +72,7 @@ classes = [] # [(descr, cls), ...] - @elidable + @elidable() def get(key): for descr, cls in Class.classes: if key.attributes == descr.attributes and\ diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -1,4 +1,5 @@ import sys +import types import py @@ -11,7 +12,7 @@ DEBUG_ELIDABLE_FUNCTIONS = False -def elidable(func): +def elidable(canfolderror=None): """ Decorate a function as "trace-elidable". Usually this means simply that the function is constant-foldable, i.e. is pure and has no side-effects. @@ -31,22 +32,31 @@ Most importantly it doesn't mean that an elidable function has no observable side effect, but those side effects are idempotent (ie caching). If a particular call to this function ends up raising an exception, then it - is handled like a normal function call (this decorator is ignored). + is handled like a normal function call (this decorator is ignored), unless + canfolderror is specified (which is an exception class or a tuple). + In this case, if it raises specific exception listed there, it's also + constant folded away. """ - if DEBUG_ELIDABLE_FUNCTIONS: - cache = {} - oldfunc = func - def func(*args): - result = oldfunc(*args) # if it raises, no caching - try: - oldresult = cache.setdefault(args, result) - except TypeError: - pass # unhashable args - else: - assert oldresult == result - return result - func._elidable_function_ = True - return func + if isinstance(canfolderror, types.FunctionType): + raise Exception("@elidable was changed to a full " + "decorator, use @elidable()") + def decorator(func): + if DEBUG_ELIDABLE_FUNCTIONS: + cache = {} + oldfunc = func + def func(*args): + result = oldfunc(*args) # if it raises, no caching + try: + oldresult = cache.setdefault(args, result) + except TypeError: + pass # unhashable args + else: + assert oldresult == result + return result + func._elidable_function_ = True + if canfolderror is not None: + func._elidable_exceptions_ = canfolderror + return func def purefunction(*args, **kwargs): import warnings diff --git a/rpython/rlib/jit_libffi.py b/rpython/rlib/jit_libffi.py --- a/rpython/rlib/jit_libffi.py +++ b/rpython/rlib/jit_libffi.py @@ -245,7 +245,7 @@ del cls._import @staticmethod - @jit.elidable + @jit.elidable() def getkind(ffi_type): """Returns 'v' for void, 'f' for float, 'i' for signed integer, 'u' for unsigned integer, 'S' for singlefloat, 'L' for long long @@ -281,7 +281,7 @@ return '?' @staticmethod - @jit.elidable + @jit.elidable() def is_struct(ffi_type): return rffi.getintfield(ffi_type, 'c_type') == FFI_TYPE_STRUCT diff --git a/rpython/rlib/libffi.py b/rpython/rlib/libffi.py --- a/rpython/rlib/libffi.py +++ b/rpython/rlib/libffi.py @@ -45,7 +45,7 @@ del cls._import @staticmethod - @jit.elidable + @jit.elidable() def getkind(ffi_type): """Returns 'v' for void, 'f' for float, 'i' for signed integer, and 'u' for unsigned integer. @@ -79,7 +79,7 @@ raise KeyError @staticmethod - @jit.elidable + @jit.elidable() def is_struct(ffi_type): return intmask(ffi_type.c_type) == FFI_TYPE_STRUCT diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -169,7 +169,7 @@ numdigits._always_inline_ = True @staticmethod - @jit.elidable + @jit.elidable() def fromint(intval): # This function is marked as pure, so you must not call it and # then modify the result. @@ -192,7 +192,7 @@ return rbigint([_store_digit(ival & MASK)], sign, 1) @staticmethod - @jit.elidable + @jit.elidable() def frombool(b): # You must not call this function and then modify the result. if b: @@ -205,7 +205,7 @@ return rbigint(*args_from_long(l)) @staticmethod - @jit.elidable + @jit.elidable() def fromfloat(dval): """ Create a new bigint object from a float """ # This function is not marked as pure because it can raise @@ -216,7 +216,7 @@ return rbigint._fromfloat_finite(dval) @staticmethod - @jit.elidable + @jit.elidable() def _fromfloat_finite(dval): sign = 1 if dval < 0.0: @@ -238,7 +238,7 @@ return v @staticmethod - @jit.elidable + @jit.elidable() @specialize.argtype(0) def fromrarith_int(i): # This function is marked as pure, so you must not call it and @@ -246,14 +246,14 @@ return rbigint(*args_from_rarith_int(i)) @staticmethod - @jit.elidable + @jit.elidable() def fromdecimalstr(s): # This function is marked as elidable, so you must not call it and # then modify the result. return _decimalstr_to_bigint(s) @staticmethod - @jit.elidable + @jit.elidable() def fromstr(s, base=0): """As string_to_int(), but ignores an optional 'l' or 'L' suffix and returns an rbigint.""" @@ -271,7 +271,7 @@ return parse_digit_string(parser) @staticmethod - @jit.elidable + @jit.elidable() def frombytes(s, byteorder, signed): if byteorder not in ('big', 'little'): raise InvalidEndiannessError() @@ -311,7 +311,7 @@ result._normalize() return result - @jit.elidable + @jit.elidable() def tobytes(self, nbytes, byteorder, signed): if byteorder not in ('big', 'little'): raise InvalidEndiannessError() @@ -384,7 +384,7 @@ digits = ''.join([digits[i] for i in range(length-1, -1, -1)]) return digits - @jit.elidable + @jit.elidable() def toint(self): """ Get an integer from a bigint object. @@ -400,7 +400,7 @@ raise OverflowError return intmask(intmask(x) * sign) - @jit.elidable + @jit.elidable() def tolonglong(self): return _AsLongLong(self) @@ -408,13 +408,13 @@ def tobool(self): return self.sign != 0 - @jit.elidable + @jit.elidable() def touint(self): if self.sign == -1: raise ValueError("cannot convert negative integer to unsigned int") return self._touint_helper() - @jit.elidable + @jit.elidable() def _touint_helper(self): x = r_uint(0) i = self.numdigits() - 1 @@ -427,32 +427,32 @@ i -= 1 return x - @jit.elidable + @jit.elidable() def toulonglong(self): if self.sign == -1: raise ValueError("cannot convert negative integer to unsigned int") return _AsULonglong_ignore_sign(self) - @jit.elidable + @jit.elidable() def uintmask(self): return _AsUInt_mask(self) - @jit.elidable + @jit.elidable() def ulonglongmask(self): """Return r_ulonglong(self), truncating.""" return _AsULonglong_mask(self) - @jit.elidable + @jit.elidable() def tofloat(self): return _AsDouble(self) - @jit.elidable + @jit.elidable() def format(self, digits, prefix='', suffix=''): # 'digits' is a string whose length is the base to use, # and where each character is the corresponding digit. return _format(self, digits, prefix, suffix) - @jit.elidable + @jit.elidable() def repr(self): try: x = self.toint() @@ -460,7 +460,7 @@ return self.format(BASE10, suffix="L") return str(x) + "L" - @jit.elidable + @jit.elidable() def str(self): try: x = self.toint() @@ -468,7 +468,7 @@ return self.format(BASE10) return str(x) - @jit.elidable + @jit.elidable() def eq(self, other): if (self.sign != other.sign or self.numdigits() != other.numdigits()): @@ -486,7 +486,7 @@ def ne(self, other): return not self.eq(other) - @jit.elidable + @jit.elidable() def lt(self, other): if self.sign > other.sign: return False @@ -533,11 +533,11 @@ def ge(self, other): return not self.lt(other) - @jit.elidable + @jit.elidable() def hash(self): return _hash(self) - @jit.elidable + @jit.elidable() def add(self, other): if self.sign == 0: return other @@ -550,7 +550,7 @@ result.sign *= other.sign return result - @jit.elidable + @jit.elidable() def sub(self, other): if other.sign == 0: return self @@ -563,7 +563,7 @@ result.sign *= self.sign return result - @jit.elidable + @jit.elidable() def mul(self, b): asize = self.numdigits() bsize = b.numdigits() @@ -608,12 +608,12 @@ result.sign = a.sign * b.sign return result - @jit.elidable + @jit.elidable() def truediv(self, other): div = _bigint_true_divide(self, other) return div - @jit.elidable + @jit.elidable() def floordiv(self, other): if self.sign == 1 and other.numdigits() == 1 and other.sign == 1: digit = other.digit(0) @@ -634,7 +634,7 @@ def div(self, other): return self.floordiv(other) - @jit.elidable + @jit.elidable() def mod(self, other): if self.sign == 0: return NULLRBIGINT @@ -671,7 +671,7 @@ mod = mod.add(other) return mod - @jit.elidable + @jit.elidable() def divmod(v, w): """ The / and % operators are now defined in terms of divmod(). @@ -697,7 +697,7 @@ div = div.sub(ONERBIGINT) return div, mod - @jit.elidable + @jit.elidable() def pow(a, b, c=None): negativeOutput = False # if x<0 return negative output @@ -837,17 +837,17 @@ z = z.sub(c) return z - @jit.elidable + @jit.elidable() def neg(self): return rbigint(self._digits, -self.sign, self.size) - @jit.elidable + @jit.elidable() def abs(self): if self.sign != -1: return self return rbigint(self._digits, 1, self.size) - @jit.elidable + @jit.elidable() def invert(self): #Implement ~x as -(x + 1) if self.sign == 0: return ONENEGATIVERBIGINT @@ -856,7 +856,7 @@ ret.sign = -ret.sign return ret - @jit.elidable + @jit.elidable() def lshift(self, int_other): if int_other < 0: raise ValueError("negative shift count") @@ -893,7 +893,7 @@ return z lshift._always_inline_ = True # It's so fast that it's always benefitial. - @jit.elidable + @jit.elidable() def lqshift(self, int_other): " A quicker one with much less checks, int_other is valid and for the most part constant." assert int_other > 0 @@ -913,7 +913,7 @@ return z lqshift._always_inline_ = True # It's so fast that it's always benefitial. - @jit.elidable + @jit.elidable() def rshift(self, int_other, dont_invert=False): if int_other < 0: raise ValueError("negative shift count") @@ -943,7 +943,7 @@ return z rshift._always_inline_ = 'try' # It's so fast that it's always benefitial. - @jit.elidable + @jit.elidable() def abs_rshift_and_mask(self, bigshiftcount, mask): assert isinstance(bigshiftcount, r_ulonglong) assert mask >= 0 @@ -992,30 +992,30 @@ z._normalize() return z - @jit.elidable + @jit.elidable() def and_(self, other): return _bitwise(self, '&', other) - @jit.elidable + @jit.elidable() def xor(self, other): return _bitwise(self, '^', other) - @jit.elidable + @jit.elidable() def or_(self, other): return _bitwise(self, '|', other) - @jit.elidable + @jit.elidable() def oct(self): if self.sign == 0: return '0L' else: return _format(self, BASE8, '0', 'L') - @jit.elidable + @jit.elidable() def hex(self): return _format(self, BASE16, '0x', 'L') - @jit.elidable + @jit.elidable() def log(self, base): # base is supposed to be positive or 0.0, which means we use e if base == 10.0: @@ -1050,7 +1050,7 @@ _normalize._always_inline_ = True - @jit.elidable + @jit.elidable() def bit_length(self): i = self.numdigits() if i == 1 and self._digits[0] == NULLDIGIT: diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -160,7 +160,7 @@ @specialize.argtype(0) - at jit.elidable + at jit.elidable() def replace(input, sub, by, maxsplit=-1): if isinstance(input, str): assert isinstance(sub, str) @@ -236,7 +236,7 @@ return start, end @specialize.argtype(0) - at jit.elidable + at jit.elidable() def startswith(u_self, prefix, start=0, end=sys.maxint): length = len(u_self) start, end = _normalize_start_end(length, start, end) @@ -249,7 +249,7 @@ return True @specialize.argtype(0) - at jit.elidable + at jit.elidable() def endswith(u_self, suffix, start=0, end=sys.maxint): length = len(u_self) start, end = _normalize_start_end(length, start, end) diff --git a/rpython/rtyper/lltypesystem/ll_str.py b/rpython/rtyper/lltypesystem/ll_str.py --- a/rpython/rtyper/lltypesystem/ll_str.py +++ b/rpython/rtyper/lltypesystem/ll_str.py @@ -10,7 +10,7 @@ else: return r_uint(i) - at jit.elidable + at jit.elidable() def ll_int2dec(val): from rpython.rtyper.lltypesystem.rstr import mallocstr @@ -44,7 +44,7 @@ for i in range(16): hex_chars[i] = "%x" % i - at jit.elidable + at jit.elidable() def ll_int2hex(i, addPrefix): from rpython.rtyper.lltypesystem.rstr import mallocstr temp = malloc(CHAR_ARRAY, 20) @@ -81,7 +81,7 @@ j += 1 return result - at jit.elidable + at jit.elidable() def ll_int2oct(i, addPrefix): from rpython.rtyper.lltypesystem.rstr import mallocstr if i == 0: diff --git a/rpython/rtyper/lltypesystem/module/ll_math.py b/rpython/rtyper/lltypesystem/module/ll_math.py --- a/rpython/rtyper/lltypesystem/module/ll_math.py +++ b/rpython/rtyper/lltypesystem/module/ll_math.py @@ -71,7 +71,7 @@ math_sin = llexternal('sin', [rffi.DOUBLE], rffi.DOUBLE, elidable_function=True) math_cos = llexternal('cos', [rffi.DOUBLE], rffi.DOUBLE, elidable_function=True) - at jit.elidable + at jit.elidable() def sqrt_nonneg(x): return math_sqrt(x) sqrt_nonneg.oopspec = "math.sqrt_nonneg(x)" diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -199,7 +199,7 @@ self.ll = LLHelpers self.malloc = mallocunicode - @jit.elidable + @jit.elidable() def ll_str(self, s): # XXX crazy that this is here, but I don't want to break # rmodel logic @@ -214,14 +214,14 @@ result.chars[i] = cast_primitive(Char, c) return result - @jit.elidable + @jit.elidable() def ll_unicode(self, s): if s: return s else: return self.ll.ll_constant_unicode(u'None') - @jit.elidable + @jit.elidable() def ll_encode_latin1(self, s): length = len(s.chars) result = mallocstr(length) @@ -266,7 +266,7 @@ class LLHelpers(AbstractLLHelpers): from rpython.rtyper.annlowlevel import llstr, llunicode - @jit.elidable + @jit.elidable() def ll_str_mul(s, times): if times < 0: times = 0 @@ -288,7 +288,7 @@ i += j return newstr - @jit.elidable + @jit.elidable() def ll_char_mul(ch, times): if typeOf(ch) is Char: malloc = mallocstr @@ -343,7 +343,7 @@ b.chars[i] = str.chars[i] return b - @jit.elidable + @jit.elidable() def ll_strhash(s): # unlike CPython, there is no reason to avoid to return -1 # but our malloc initializes the memory to zero, so we use zero as the @@ -364,7 +364,7 @@ def ll_strfasthash(s): return s.hash # assumes that the hash is already computed - @jit.elidable + @jit.elidable() def ll_strconcat(s1, s2): len1 = s1.length() len2 = s2.length() @@ -384,7 +384,7 @@ return newstr ll_strconcat.oopspec = 'stroruni.concat(s1, s2)' - @jit.elidable + @jit.elidable() def ll_strip(s, ch, left, right): s_len = len(s.chars) if s_len == 0: @@ -404,7 +404,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result - @jit.elidable + @jit.elidable() def ll_strip_default(s, left, right): s_len = len(s.chars) if s_len == 0: @@ -424,7 +424,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result - @jit.elidable + @jit.elidable() def ll_strip_multiple(s, s2, left, right): s_len = len(s.chars) if s_len == 0: @@ -444,7 +444,7 @@ s.copy_contents(s, result, lpos, 0, r_len) return result - @jit.elidable + @jit.elidable() def ll_upper(s): s_chars = s.chars s_len = len(s_chars) @@ -458,7 +458,7 @@ i += 1 return result - @jit.elidable + @jit.elidable() def ll_lower(s): s_chars = s.chars s_len = len(s_chars) @@ -505,7 +505,7 @@ i += 1 return result - @jit.elidable + @jit.elidable() def ll_strcmp(s1, s2): if not s1 and not s2: return True @@ -528,7 +528,7 @@ i += 1 return len1 - len2 - @jit.elidable + @jit.elidable() def ll_streq(s1, s2): if s1 == s2: # also if both are NULLs return True @@ -548,7 +548,7 @@ return True ll_streq.oopspec = 'stroruni.equal(s1, s2)' - @jit.elidable + @jit.elidable() def ll_startswith(s1, s2): len1 = len(s1.chars) len2 = len(s2.chars) @@ -569,7 +569,7 @@ return False return s.chars[0] == ch - @jit.elidable + @jit.elidable() def ll_endswith(s1, s2): len1 = len(s1.chars) len2 = len(s2.chars) @@ -591,7 +591,7 @@ return False return s.chars[len(s.chars) - 1] == ch - @jit.elidable + @jit.elidable() @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find_char(s, ch, start, end): i = start @@ -603,7 +603,7 @@ i += 1 return -1 - @jit.elidable + @jit.elidable() def ll_rfind_char(s, ch, start, end): if end > len(s.chars): end = len(s.chars) @@ -614,7 +614,7 @@ return i return -1 - @jit.elidable + @jit.elidable() def ll_count_char(s, ch, start, end): count = 0 i = start @@ -676,7 +676,7 @@ res = 0 return res - @jit.elidable + @jit.elidable() def ll_search(s1, s2, start, end, mode): count = 0 n = end - start @@ -818,7 +818,7 @@ @jit.oopspec('stroruni.slice(s1, start, stop)') @signature(types.any(), types.int(), types.int(), returns=types.any()) - @jit.elidable + @jit.elidable() def _ll_stringslice(s1, start, stop): lgt = stop - start assert start >= 0 @@ -980,7 +980,7 @@ item.copy_contents(s, item, 0, 0, prev_pos) return res - @jit.elidable + @jit.elidable() def ll_replace_chr_chr(s, c1, c2): length = len(s.chars) newstr = s.malloc(length) @@ -995,7 +995,7 @@ j += 1 return newstr - @jit.elidable + @jit.elidable() def ll_contains(s, c): chars = s.chars strlen = len(chars) @@ -1006,7 +1006,7 @@ i += 1 return False - @jit.elidable + @jit.elidable() def ll_int(s, base): if not 2 <= base <= 36: raise ValueError diff --git a/rpython/rtyper/rfloat.py b/rpython/rtyper/rfloat.py --- a/rpython/rtyper/rfloat.py +++ b/rpython/rtyper/rfloat.py @@ -136,7 +136,7 @@ hop.exception_cannot_occur() return vlist[0] - @jit.elidable + @jit.elidable() def ll_str(self, f): return llstr(formatd(f, 'f', 6)) diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -366,7 +366,7 @@ hop.exception_cannot_occur() return vlist[0] - @jit.elidable + @jit.elidable() def ll_str(self, i): from rpython.rtyper.lltypesystem.ll_str import ll_int2dec return ll_int2dec(i) diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -20,7 +20,7 @@ self.rstr_decode_utf_8 = func_with_new_name(str_decode_utf_8_impl, 'rstr_decode_utf_8_impl') - @jit.elidable + @jit.elidable() def ll_decode_utf8(self, llvalue): from rpython.rtyper.annlowlevel import hlstr value = hlstr(llvalue) @@ -70,7 +70,7 @@ def rtype_method_lower(self, hop): raise TypeError("Cannot do tolower on unicode string") - @jit.elidable + @jit.elidable() def ll_encode_utf8(self, ll_s): from rpython.rtyper.annlowlevel import hlunicode s = hlunicode(ll_s) diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -160,7 +160,7 @@ pdb_plus_show.start(tb) - at jit.elidable + at jit.elidable() def offset2lineno(c, stopat): tab = c.co_lnotab line = c.co_firstlineno From noreply at buildbot.pypy.org Tue Mar 18 09:32:49 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 18 Mar 2014 09:32:49 +0100 (CET) Subject: [pypy-commit] pypy elidable-canfold-exception: make the old @elidable still usable, with a warning Message-ID: <20140318083249.551191C0166@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: elidable-canfold-exception Changeset: r70047:4ce5abc5856b Date: 2014-03-18 09:32 +0100 http://bitbucket.org/pypy/pypy/changeset/4ce5abc5856b/ Log: make the old @elidable still usable, with a warning diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -1,5 +1,6 @@ import sys import types +import warnings import py @@ -12,7 +13,7 @@ DEBUG_ELIDABLE_FUNCTIONS = False -def elidable(canfolderror=None): +def elidable(*args, **kwargs): """ Decorate a function as "trace-elidable". Usually this means simply that the function is constant-foldable, i.e. is pure and has no side-effects. @@ -37,9 +38,7 @@ In this case, if it raises specific exception listed there, it's also constant folded away. """ - if isinstance(canfolderror, types.FunctionType): - raise Exception("@elidable was changed to a full " - "decorator, use @elidable()") + def decorator(func): if DEBUG_ELIDABLE_FUNCTIONS: cache = {} @@ -58,6 +57,17 @@ func._elidable_exceptions_ = canfolderror return func + if len(args) == 1: + # non-ported + assert len(kwargs) == 0 + warnings.warn("@elidable is deprecated, use @elidable() instead", stacklevel=2) + canfolderror = None + return decorator(args[0]) + else: + assert len(args) == 0 + canfolderror = kwargs.get('canfolderror', None) + return decorator + def purefunction(*args, **kwargs): import warnings warnings.warn("purefunction is deprecated, use elidable instead", DeprecationWarning) @@ -133,7 +143,7 @@ function """ def decorator(func): - elidable(func) + elidable()(func) args = _get_args(func) argstring = ", ".join(args) code = ["def f(%s):\n" % (argstring, )] diff --git a/rpython/rlib/test/test_jit.py b/rpython/rlib/test/test_jit.py --- a/rpython/rlib/test/test_jit.py +++ b/rpython/rlib/test/test_jit.py @@ -3,7 +3,7 @@ from rpython.conftest import option from rpython.annotator.model import UnionError from rpython.rlib.jit import (hint, we_are_jitted, JitDriver, elidable_promote, - JitHintError, oopspec, isconstant, conditional_call) + JitHintError, oopspec, isconstant, conditional_call, elidable) from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.lltypesystem import lltype @@ -91,6 +91,27 @@ myjitdriver = JitDriver(greens=['n'], reds=[]) py.test.raises(JitHintError, fn, 100) +def test_elidable(): + import warnings + # deprecated version + if 1:#with warnings.catch_warnings(record=True) as w: + #assert not w + @elidable + def f(): + pass + assert f._elidable_function_ + #assert "@elidable()" in w[0].message[0] + + # proper version + @elidable() + def f(): + pass + assert f._elidable_function_ + @elidable(canfolderror=ValueError) + def f(): + pass + assert f._elidable_function_ + class TestJIT(BaseRtypingTest): def test_hint(self): def f(): From noreply at buildbot.pypy.org Tue Mar 18 09:34:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 09:34:44 +0100 (CET) Subject: [pypy-commit] stmgc default: Add a README for the llvmfix directory Message-ID: <20140318083444.DD2591C0166@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1062:57aa8f49c843 Date: 2014-03-18 09:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/57aa8f49c843/ Log: Add a README for the llvmfix directory diff --git a/c7/llvmfix/README.txt b/c7/llvmfix/README.txt new file mode 100644 --- /dev/null +++ b/c7/llvmfix/README.txt @@ -0,0 +1,24 @@ + +no-introduce-bogus-cast-in-combine.diff + + This is just fixes for a couple of bugs. + + +no-memset-creation-with-addrspace.diff + + This is a workaround for the fact that llvm.memset doesn't support + the address_space 256. It's a workaround, because it also prevents + some useful optimizations: for example replacing "x->a = 0; x->b = + 0;" with a single larger zeroing instruction. In other words, it + crashes only if an unpatched llvm introduce llvm.memset *and* this + memset remains as a real function call in the end. + + +addrspacecast-in-constant.diff + + This is a workaround for (what we believe to be) clang producing + incorrectly the addrspacecast operation for this kind of code: + + static __attribute__((address_space(256))) long a = 42; + struct s1 { void *a; }; + struct s1 fofo = { (void *)(long)&a }; From noreply at buildbot.pypy.org Tue Mar 18 09:36:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 09:36:28 +0100 (CET) Subject: [pypy-commit] stmgc default: Change the example to not rely on globals in strange address spaces Message-ID: <20140318083628.231C41C0166@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1063:4574e37f7ae6 Date: 2014-03-18 09:34 +0100 http://bitbucket.org/pypy/stmgc/changeset/4574e37f7ae6/ Log: Change the example to not rely on globals in strange address spaces diff --git a/c7/llvmfix/README.txt b/c7/llvmfix/README.txt --- a/c7/llvmfix/README.txt +++ b/c7/llvmfix/README.txt @@ -19,6 +19,6 @@ This is a workaround for (what we believe to be) clang producing incorrectly the addrspacecast operation for this kind of code: - static __attribute__((address_space(256))) long a = 42; - struct s1 { void *a; }; - struct s1 fofo = { (void *)(long)&a }; + static int a = 42; + struct s1 { void __attribute__((address_space(256))) *a; }; + struct s1 fofo = { (void __attribute__((address_space(256))) *)(long)&a }; From noreply at buildbot.pypy.org Tue Mar 18 09:40:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 09:40:47 +0100 (CET) Subject: [pypy-commit] pypy elidable-canfold-exception: cleaner? no need to unpack args ourself Message-ID: <20140318084047.F41CF1C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: elidable-canfold-exception Changeset: r70048:ca1d94c202fc Date: 2014-03-18 04:39 -0400 http://bitbucket.org/pypy/pypy/changeset/ca1d94c202fc/ Log: cleaner? no need to unpack args ourself diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -1,5 +1,4 @@ import sys -import types import warnings import py @@ -13,7 +12,7 @@ DEBUG_ELIDABLE_FUNCTIONS = False -def elidable(*args, **kwargs): +def elidable(canfolderror=None): """ Decorate a function as "trace-elidable". Usually this means simply that the function is constant-foldable, i.e. is pure and has no side-effects. @@ -38,7 +37,6 @@ In this case, if it raises specific exception listed there, it's also constant folded away. """ - def decorator(func): if DEBUG_ELIDABLE_FUNCTIONS: cache = {} @@ -56,17 +54,17 @@ if canfolderror is not None: func._elidable_exceptions_ = canfolderror return func + return decorator +_elidable = elidable +def elidable(*args, **kwargs): if len(args) == 1: - # non-ported assert len(kwargs) == 0 warnings.warn("@elidable is deprecated, use @elidable() instead", stacklevel=2) - canfolderror = None - return decorator(args[0]) + return _elidable()(args[0]) else: assert len(args) == 0 - canfolderror = kwargs.get('canfolderror', None) - return decorator + return _elidable(**kwargs) def purefunction(*args, **kwargs): import warnings From noreply at buildbot.pypy.org Tue Mar 18 10:08:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 10:08:53 +0100 (CET) Subject: [pypy-commit] pypy elidable-canfold-exception: could also take this approach Message-ID: <20140318090853.725DB1D2540@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: elidable-canfold-exception Changeset: r70049:f73abcc35c33 Date: 2014-03-18 05:07 -0400 http://bitbucket.org/pypy/pypy/changeset/f73abcc35c33/ Log: could also take this approach diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -1,4 +1,6 @@ +from __future__ import absolute_import import sys +import types import warnings import py @@ -58,16 +60,13 @@ _elidable = elidable def elidable(*args, **kwargs): - if len(args) == 1: + if len(args) == 1 and isinstance(args[0], types.FunctionType): assert len(kwargs) == 0 warnings.warn("@elidable is deprecated, use @elidable() instead", stacklevel=2) return _elidable()(args[0]) - else: - assert len(args) == 0 - return _elidable(**kwargs) + return _elidable(*args, **kwargs) def purefunction(*args, **kwargs): - import warnings warnings.warn("purefunction is deprecated, use elidable instead", DeprecationWarning) return elidable(*args, **kwargs) @@ -160,7 +159,6 @@ return decorator def purefunction_promote(*args, **kwargs): - import warnings warnings.warn("purefunction_promote is deprecated, use elidable_promote instead", DeprecationWarning) return elidable_promote(*args, **kwargs) diff --git a/rpython/rlib/test/test_jit.py b/rpython/rlib/test/test_jit.py --- a/rpython/rlib/test/test_jit.py +++ b/rpython/rlib/test/test_jit.py @@ -94,19 +94,25 @@ def test_elidable(): import warnings # deprecated version - if 1:#with warnings.catch_warnings(record=True) as w: - #assert not w + with warnings.catch_warnings(record=True) as w: + assert not w @elidable def f(): pass assert f._elidable_function_ - #assert "@elidable()" in w[0].message[0] + assert "@elidable()" in w[0].message[0] # proper version @elidable() def f(): pass assert f._elidable_function_ + + @elidable(ValueError) + def f(): + pass + assert f._elidable_function_ + @elidable(canfolderror=ValueError) def f(): pass From noreply at buildbot.pypy.org Tue Mar 18 10:31:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 10:31:29 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: A branch to add fork() support Message-ID: <20140318093129.1A8EE1D26D4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1064:1df308a509dd Date: 2014-03-18 10:29 +0100 http://bitbucket.org/pypy/stmgc/changeset/1df308a509dd/ Log: A branch to add fork() support From noreply at buildbot.pypy.org Tue Mar 18 10:31:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 10:31:30 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: in-progress, maybe, who knows Message-ID: <20140318093130.38ED71D26D4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1065:f467a519f7b6 Date: 2014-03-18 10:29 +0100 http://bitbucket.org/pypy/stmgc/changeset/f467a519f7b6/ Log: in-progress, maybe, who knows diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c new file mode 100644 --- /dev/null +++ b/c7/stm/forksupport.c @@ -0,0 +1,94 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +/* XXX this is currently not doing copy-on-write, but simply forces a + copy of all shared pages as soon as fork() is called. */ + + +static char *fork_big_copy; + + +static void forksupport_prepare(void) +{ + if (stm_object_pages == NULL) + return; + + /* This silently assumes that fork() is not called from transactions. + It's hard to check though... + */ + s_mutex_lock(); + + synchronize_all_threads(); + + mutex_pages_lock(); + + char *big_copy = setup_mmap("stmgc's fork support"); + + uintptr_t pagenum, endpagenum; + pagenum = END_NURSERY_PAGE; /* starts after the nursery */ + endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; + + while (1) { + if (UNLIKELY(pagenum == endpagenum)) { + /* we reach this point usually twice, because there are + more pages after 'uninitialized_page_stop' */ + if (endpagenum == NB_PAGES) + break; /* done */ + pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; + endpagenum = NB_PAGES; + if (pagenum == endpagenum) + break; /* no pages in the 2nd section, so done too */ + } + + pagecopy(big_copy + pagenum * 4096UL, + stm_object_pages + pagenum * 4096UL); + pagenum++; + } + + assert(fork_big_copy == NULL); + fork_big_copy = big_copy; +} + +static void forksupport_parent(void) +{ + if (stm_object_pages == NULL) + return; + + assert(fork_big_copy != NULL); + munmap(fork_big_copy, TOTAL_MEMORY); + stm_object_pages = NULL; + + mutex_pages_unlock(); + s_mutex_unlock(); +} + +static void forksupport_child(void) +{ + if (stm_object_pages == NULL) + return; + + mremap(fork_big_copy, TOTAL_MEMORY, TOTAL_MEMORY, + MREMAP_MAYMOVE | MREMAP_FIXED, + stm_object_pages); + + ...; reset carefully a much bigger part of the state here :-((( + memset(pages_privatized, 0, sizeof(pages_privatized)); + + mutex_pages_unlock(); + s_mutex_unlock(); +} + + +static void setup_forksupport(void) +{ + static bool fork_support_ready = false; + + if (!fork_support_ready) { + int res = pthread_atfork(forksupport_prepare, forksupport_parent, + forksupport_child); + assert(res == 0); + fork_support_ready = true; + } +} diff --git a/c7/stm/forksupport.h b/c7/stm/forksupport.h new file mode 100644 --- /dev/null +++ b/c7/stm/forksupport.h @@ -0,0 +1,2 @@ + +static void setup_forksupport(void); diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -39,6 +39,7 @@ static void page_privatize(uintptr_t pagenum); static void page_reshare(uintptr_t pagenum); +/* Note: don't ever do "mutex_pages_lock(); mutex_lock()" in that order */ static void mutex_pages_lock(void); static void mutex_pages_unlock(void); static bool _has_mutex_pages(void) __attribute__((unused)); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -3,6 +3,38 @@ #endif +static char *setup_mmap(char *reason) +{ + char *result = mmap(NULL, TOTAL_MEMORY, + PROT_READ | PROT_WRITE, + MAP_PAGES_FLAGS, -1, 0); + if (result == MAP_FAILED) + stm_fatalerror("%s failed: %m\n", reason); + + /* The segment 0 is not used to run transactions, but contains the + shared copy of the pages. We mprotect all pages before so that + accesses fail, up to and including the pages corresponding to the + nurseries of the other segments. */ + mprotect(result, END_NURSERY_PAGE * 4096UL, PROT_NONE); + + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *segment_base = result + i * (NB_PAGES * 4096UL); + + /* In each segment, the first page is where TLPREFIX'ed + NULL accesses land. We mprotect it so that accesses fail. */ + mprotect(segment_base, 4096, PROT_NONE); + + /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ + if (FIRST_READMARKER_PAGE > 2) + mprotect(segment_base + 8192, + (FIRST_READMARKER_PAGE - 2) * 4096UL, + PROT_NONE); + } + + return result; +} + void stm_setup(void) { /* Check that some values are acceptable */ @@ -20,38 +52,18 @@ (FIRST_READMARKER_PAGE * 4096UL)); assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); - stm_object_pages = mmap(NULL, TOTAL_MEMORY, - PROT_READ | PROT_WRITE, - MAP_PAGES_FLAGS, -1, 0); - if (stm_object_pages == MAP_FAILED) - stm_fatalerror("initial stm_object_pages mmap() failed: %m\n"); - - /* The segment 0 is not used to run transactions, but to contain the - shared copy of the pages. We mprotect all pages before so that - accesses fail, up to and including the pages corresponding to the - nurseries of the other segments. */ - mprotect(stm_object_pages, END_NURSERY_PAGE * 4096UL, PROT_NONE); + stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); long i; for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); - /* In each segment, the first page is where TLPREFIX'ed - NULL accesses land. We mprotect it so that accesses fail. */ - mprotect(segment_base, 4096, PROT_NONE); - /* Fill the TLS page (page 1) with 0xDC, for debugging */ memset(REAL_ADDRESS(segment_base, 4096), 0xDC, 4096); /* Make a "hole" at STM_PSEGMENT (which includes STM_SEGMENT) */ memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, sizeof(*STM_PSEGMENT)); - /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ - if (FIRST_READMARKER_PAGE > 2) - mprotect(segment_base + 8192, - (FIRST_READMARKER_PAGE - 2) * 4096UL, - PROT_NONE); - /* Initialize STM_PSEGMENT */ struct stm_priv_segment_info_s *pr = get_priv_segment(i); assert(1 <= i && i < 255); /* 255 is WL_VISITED in gcpage.c */ @@ -83,6 +95,7 @@ setup_nursery(); setup_gcpage(); setup_pages(); + setup_forksupport(); } void stm_teardown(void) diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -13,6 +13,7 @@ #include "stm/extra.h" #include "stm/fprintcolor.h" #include "stm/weakref.h" +#include "stm/forksupport.h" #include "stm/misc.c" #include "stm/list.c" @@ -30,3 +31,4 @@ #include "stm/extra.c" #include "stm/fprintcolor.c" #include "stm/weakref.c" +#include "stm/forksupport.c" From noreply at buildbot.pypy.org Tue Mar 18 11:52:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 11:52:20 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: Hopefully finish fork support Message-ID: <20140318105220.5FBD51D26D3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1066:c07cd6a30719 Date: 2014-03-18 11:50 +0100 http://bitbucket.org/pypy/stmgc/changeset/c07cd6a30719/ Log: Hopefully finish fork support diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -7,7 +7,11 @@ copy of all shared pages as soon as fork() is called. */ -static char *fork_big_copy; +static char *fork_big_copy = NULL; + +static char *setup_mmap(char *reason); /* forward, in setup.c */ +static void do_or_redo_setup_after_fork(void); /* forward, in setup.c */ +static void do_or_redo_teardown_after_fork(void); /* forward, in setup.c */ static void forksupport_prepare(void) @@ -58,7 +62,7 @@ assert(fork_big_copy != NULL); munmap(fork_big_copy, TOTAL_MEMORY); - stm_object_pages = NULL; + fork_big_copy = NULL; mutex_pages_unlock(); s_mutex_unlock(); @@ -69,15 +73,23 @@ if (stm_object_pages == NULL) return; + /* xxx the stm_thread_local_t belonging to other threads just leak. + Note that stm_all_thread_locals is preserved across a + stm_teardown/stm_setup sequence. */ + + mutex_pages_unlock(); + s_mutex_unlock(); + + do_or_redo_teardown_after_fork(); + + assert(fork_big_copy != NULL); + assert(stm_object_pages != NULL); mremap(fork_big_copy, TOTAL_MEMORY, TOTAL_MEMORY, MREMAP_MAYMOVE | MREMAP_FIXED, stm_object_pages); + fork_big_copy = NULL; - ...; reset carefully a much bigger part of the state here :-((( - memset(pages_privatized, 0, sizeof(pages_privatized)); - - mutex_pages_unlock(); - s_mutex_unlock(); + do_or_redo_setup_after_fork(); } @@ -88,7 +100,8 @@ if (!fork_support_ready) { int res = pthread_atfork(forksupport_prepare, forksupport_parent, forksupport_child); - assert(res == 0); + if (res != 0) + stm_fatalerror("pthread_atfork() failed: %m"); fork_support_ready = true; } } diff --git a/c7/stm/forksupport.h b/c7/stm/forksupport.h deleted file mode 100644 --- a/c7/stm/forksupport.h +++ /dev/null @@ -1,2 +0,0 @@ - -static void setup_forksupport(void); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -32,10 +32,6 @@ } } -static void teardown_nursery(void) -{ -} - static inline bool _is_in_nursery(object_t *obj) { assert((uintptr_t)obj >= NURSERY_START); diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -25,6 +25,10 @@ static void teardown_pages(void) { memset(&pages_ctl, 0, sizeof(pages_ctl)); +} + +static void teardown_pages_1(void) +{ memset(pages_privatized, 0, sizeof(pages_privatized)); } diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -35,25 +35,8 @@ return result; } -void stm_setup(void) +static void do_or_redo_setup_after_fork(void) { - /* Check that some values are acceptable */ - assert(NB_SEGMENTS <= NB_SEGMENTS_MAX); - assert(4096 <= ((uintptr_t)STM_SEGMENT)); - assert((uintptr_t)STM_SEGMENT == (uintptr_t)STM_PSEGMENT); - assert(((uintptr_t)STM_PSEGMENT) + sizeof(*STM_PSEGMENT) <= 8192); - assert(2 <= FIRST_READMARKER_PAGE); - assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START); - assert(READMARKER_START < READMARKER_END); - assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); - assert(FIRST_OBJECT_PAGE < NB_PAGES); - assert((NB_PAGES * 4096UL) >> 8 <= (FIRST_OBJECT_PAGE * 4096UL) >> 4); - assert((END_NURSERY_PAGE * 4096UL) >> 8 <= - (FIRST_READMARKER_PAGE * 4096UL)); - assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); - - stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); - long i; for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); @@ -90,20 +73,38 @@ so a null read marker means "not read" whatever the current transaction_read_version is. */ + setup_nursery(); +} + +void stm_setup(void) +{ + /* Check that some values are acceptable */ + assert(NB_SEGMENTS <= NB_SEGMENTS_MAX); + assert(4096 <= ((uintptr_t)STM_SEGMENT)); + assert((uintptr_t)STM_SEGMENT == (uintptr_t)STM_PSEGMENT); + assert(((uintptr_t)STM_PSEGMENT) + sizeof(*STM_PSEGMENT) <= 8192); + assert(2 <= FIRST_READMARKER_PAGE); + assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START); + assert(READMARKER_START < READMARKER_END); + assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); + assert(FIRST_OBJECT_PAGE < NB_PAGES); + assert((NB_PAGES * 4096UL) >> 8 <= (FIRST_OBJECT_PAGE * 4096UL) >> 4); + assert((END_NURSERY_PAGE * 4096UL) >> 8 <= + (FIRST_READMARKER_PAGE * 4096UL)); + assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); + + stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); + + do_or_redo_setup_after_fork(); setup_sync(); - setup_nursery(); setup_gcpage(); setup_pages(); setup_forksupport(); } -void stm_teardown(void) +static void do_or_redo_teardown_after_fork(void) { - /* This function is called during testing, but normal programs don't - need to call it. */ - assert(!_has_mutex()); - long i; for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); @@ -117,13 +118,24 @@ tree_free(pr->callbacks_on_abort); } + teardown_core(); + teardown_sync_1(); + teardown_pages_1(); +} + +void stm_teardown(void) +{ + /* This function is called during testing, but normal programs don't + need to call it. */ + assert(!_has_mutex()); + + do_or_redo_teardown_after_fork(); + munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; - teardown_core(); teardown_sync(); teardown_gcpage(); - teardown_nursery(); teardown_pages(); } @@ -174,6 +186,7 @@ void stm_unregister_thread_local(stm_thread_local_t *tl) { s_mutex_lock(); + assert(tl->prev != NULL); assert(tl->next != NULL); _done_shadow_stack(tl); if (tl == stm_all_thread_locals) { diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -59,7 +59,10 @@ if (pthread_cond_destroy(&sync_ctl.cond[i]) != 0) stm_fatalerror("cond destroy: %m\n"); } +} +static void teardown_sync_1(void) +{ memset(&sync_ctl, 0, sizeof(sync_ctl)); } diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -13,7 +13,6 @@ #include "stm/extra.h" #include "stm/fprintcolor.h" #include "stm/weakref.h" -#include "stm/forksupport.h" #include "stm/misc.c" #include "stm/list.c" @@ -24,6 +23,7 @@ #include "stm/largemalloc.c" #include "stm/nursery.c" #include "stm/sync.c" +#include "stm/forksupport.c" #include "stm/setup.c" #include "stm/hash_id.c" #include "stm/core.c" @@ -31,4 +31,3 @@ #include "stm/extra.c" #include "stm/fprintcolor.c" #include "stm/weakref.c" -#include "stm/forksupport.c" From noreply at buildbot.pypy.org Tue Mar 18 12:46:42 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 18 Mar 2014 12:46:42 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: make better names for the generated elidable funcs Message-ID: <20140318114642.1A61F1C0166@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r657:a9926a6cd014 Date: 2014-03-18 12:46 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a9926a6cd014/ Log: make better names for the generated elidable funcs diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -5,6 +5,8 @@ from rpython.rlib import rarithmetic, jit from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.debug import make_sure_not_resized + elidable_func.func_name = "elidable_" + func.func_name + meth.func_name = "elidable_meth_" + func.func_name class AbstractShadow(object): """A shadow is an optional extra bit of information that From noreply at buildbot.pypy.org Tue Mar 18 13:19:11 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 18 Mar 2014 13:19:11 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: gah, fix Message-ID: <20140318121911.49FCD1D2808@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r659:c3530864395f Date: 2014-03-18 13:19 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/c3530864395f/ Log: gah, fix diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -5,8 +5,6 @@ from rpython.rlib import rarithmetic, jit from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.debug import make_sure_not_resized - elidable_func.func_name = "elidable_" + func.func_name - meth.func_name = "elidable_meth_" + func.func_name class AbstractShadow(object): """A shadow is an optional extra bit of information that diff --git a/spyvm/version.py b/spyvm/version.py --- a/spyvm/version.py +++ b/spyvm/version.py @@ -8,6 +8,8 @@ return func(self, *args) def meth(self, *args): return elidable_func(self, self.version, *args) + elidable_func.func_name = "elidable_" + func.func_name + meth.func_name = "elidable_meth_" + func.func_name return meth # In addition to marking the decorated function as "pure", both the receiver From noreply at buildbot.pypy.org Tue Mar 18 14:04:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 14:04:13 +0100 (CET) Subject: [pypy-commit] stmgc default: Don't put "\n" at the end of stm_fatalerror() messages (as it was Message-ID: <20140318130413.424BF1D2971@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1067:8aa49554dd50 Date: 2014-03-18 14:03 +0100 http://bitbucket.org/pypy/stmgc/changeset/8aa49554dd50/ Log: Don't put "\n" at the end of stm_fatalerror() messages (as it was already forgotten occasionally) diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -157,7 +157,7 @@ MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) { /* fall-back */ #if STM_TESTS - stm_fatalerror("reset_transaction_read_version: %m\n"); + stm_fatalerror("reset_transaction_read_version: %m"); #endif memset(readmarkers, 0, NB_READMARKER_PAGES * 4096UL); } diff --git a/c7/stm/fprintcolor.c b/c7/stm/fprintcolor.c --- a/c7/stm/fprintcolor.c +++ b/c7/stm/fprintcolor.c @@ -41,6 +41,7 @@ va_start(ap, format); vfprintf(stderr, format, ap); + fprintf(stderr, "\n"); va_end(ap); abort(); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -60,7 +60,7 @@ return; out_of_memory: - stm_fatalerror("out of memory!\n"); /* XXX */ + stm_fatalerror("out of memory!"); /* XXX */ } static char *_allocate_small_slowpath(uint64_t size) @@ -85,7 +85,7 @@ /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = _stm_large_malloc(size); if (addr == NULL) - stm_fatalerror("not enough memory!\n"); + stm_fatalerror("not enough memory!"); if (addr + size > uninitialized_page_start) { uintptr_t npages; @@ -93,7 +93,7 @@ npages += GCPAGE_NUM_PAGES; if (uninitialized_page_stop - uninitialized_page_start < npages * 4096UL) { - stm_fatalerror("out of memory!\n"); /* XXX */ + stm_fatalerror("out of memory!"); /* XXX */ } setup_N_pages(uninitialized_page_start, npages); uninitialized_page_start += npages * 4096UL; diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -12,7 +12,7 @@ uintptr_t initial_allocation = 32; struct list_s *lst = malloc(LIST_SETSIZE(initial_allocation)); if (lst == NULL) - stm_fatalerror("out of memory in list_create\n"); /* XXX */ + stm_fatalerror("out of memory in list_create"); /* XXX */ lst->count = 0; lst->last_allocated = initial_allocation - 1; @@ -24,7 +24,7 @@ nalloc = LIST_OVERCNT(nalloc); lst = realloc(lst, LIST_SETSIZE(nalloc)); if (lst == NULL) - stm_fatalerror("out of memory in _list_grow\n"); /* XXX */ + stm_fatalerror("out of memory in _list_grow"); /* XXX */ lst->last_allocated = nalloc - 1; return lst; @@ -93,7 +93,7 @@ //fprintf(stderr, "growth: %ld\n", newalloc); char *newitems = malloc(newalloc); if (newitems == NULL) { - stm_fatalerror("out of memory!\n"); /* XXX */ + stm_fatalerror("out of memory!"); /* XXX */ } newtree.raw_start = newitems; newtree.raw_current = newitems; diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -93,7 +93,7 @@ int res = remap_file_pages(addr, size, 0, pgoff, 0); if (UNLIKELY(res < 0)) - stm_fatalerror("remap_file_pages: %m\n"); + stm_fatalerror("remap_file_pages: %m"); } static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count) diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -24,7 +24,7 @@ PROT_READ | PROT_WRITE, MAP_PAGES_FLAGS, -1, 0); if (stm_object_pages == MAP_FAILED) - stm_fatalerror("initial stm_object_pages mmap() failed: %m\n"); + stm_fatalerror("initial stm_object_pages mmap() failed: %m"); /* The segment 0 is not used to run transactions, but to contain the shared copy of the pages. We mprotect all pages before so that diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -40,24 +40,24 @@ static void setup_sync(void) { if (pthread_mutex_init(&sync_ctl.global_mutex, NULL) != 0) - stm_fatalerror("mutex initialization: %m\n"); + stm_fatalerror("mutex initialization: %m"); long i; for (i = 0; i < _C_TOTAL; i++) { if (pthread_cond_init(&sync_ctl.cond[i], NULL) != 0) - stm_fatalerror("cond initialization: %m\n"); + stm_fatalerror("cond initialization: %m"); } } static void teardown_sync(void) { if (pthread_mutex_destroy(&sync_ctl.global_mutex) != 0) - stm_fatalerror("mutex destroy: %m\n"); + stm_fatalerror("mutex destroy: %m"); long i; for (i = 0; i < _C_TOTAL; i++) { if (pthread_cond_destroy(&sync_ctl.cond[i]) != 0) - stm_fatalerror("cond destroy: %m\n"); + stm_fatalerror("cond destroy: %m"); } memset(&sync_ctl, 0, sizeof(sync_ctl)); @@ -74,14 +74,14 @@ static void set_gs_register(char *value) { if (UNLIKELY(syscall(SYS_arch_prctl, ARCH_SET_GS, (uint64_t)value) != 0)) - stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m\n"); + stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m"); } static inline void s_mutex_lock(void) { assert(!_has_mutex_here); if (UNLIKELY(pthread_mutex_lock(&sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_mutex_lock: %m\n"); + stm_fatalerror("pthread_mutex_lock: %m"); assert((_has_mutex_here = true, 1)); } @@ -89,32 +89,32 @@ { assert(_has_mutex_here); if (UNLIKELY(pthread_mutex_unlock(&sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_mutex_unlock: %m\n"); + stm_fatalerror("pthread_mutex_unlock: %m"); assert((_has_mutex_here = false, 1)); } static inline void cond_wait(enum cond_type_e ctype) { #ifdef STM_NO_COND_WAIT - stm_fatalerror("*** cond_wait/%d called!\n", (int)ctype); + stm_fatalerror("*** cond_wait/%d called!", (int)ctype); #endif assert(_has_mutex_here); if (UNLIKELY(pthread_cond_wait(&sync_ctl.cond[ctype], &sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_cond_wait/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_wait/%d: %m", (int)ctype); } static inline void cond_signal(enum cond_type_e ctype) { if (UNLIKELY(pthread_cond_signal(&sync_ctl.cond[ctype]) != 0)) - stm_fatalerror("pthread_cond_signal/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_signal/%d: %m", (int)ctype); } static inline void cond_broadcast(enum cond_type_e ctype) { if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.cond[ctype]) != 0)) - stm_fatalerror("pthread_cond_broadcast/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_broadcast/%d: %m", (int)ctype); } /************************************************************/ From noreply at buildbot.pypy.org Tue Mar 18 14:04:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 14:04:14 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: hg merge default Message-ID: <20140318130414.689901D2972@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1068:91b34e65d842 Date: 2014-03-18 14:04 +0100 http://bitbucket.org/pypy/stmgc/changeset/91b34e65d842/ Log: hg merge default diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -157,7 +157,7 @@ MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) { /* fall-back */ #if STM_TESTS - stm_fatalerror("reset_transaction_read_version: %m\n"); + stm_fatalerror("reset_transaction_read_version: %m"); #endif memset(readmarkers, 0, NB_READMARKER_PAGES * 4096UL); } diff --git a/c7/stm/fprintcolor.c b/c7/stm/fprintcolor.c --- a/c7/stm/fprintcolor.c +++ b/c7/stm/fprintcolor.c @@ -41,6 +41,7 @@ va_start(ap, format); vfprintf(stderr, format, ap); + fprintf(stderr, "\n"); va_end(ap); abort(); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -60,7 +60,7 @@ return; out_of_memory: - stm_fatalerror("out of memory!\n"); /* XXX */ + stm_fatalerror("out of memory!"); /* XXX */ } static char *_allocate_small_slowpath(uint64_t size) @@ -85,7 +85,7 @@ /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = _stm_large_malloc(size); if (addr == NULL) - stm_fatalerror("not enough memory!\n"); + stm_fatalerror("not enough memory!"); if (addr + size > uninitialized_page_start) { uintptr_t npages; @@ -93,7 +93,7 @@ npages += GCPAGE_NUM_PAGES; if (uninitialized_page_stop - uninitialized_page_start < npages * 4096UL) { - stm_fatalerror("out of memory!\n"); /* XXX */ + stm_fatalerror("out of memory!"); /* XXX */ } setup_N_pages(uninitialized_page_start, npages); uninitialized_page_start += npages * 4096UL; diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -12,7 +12,7 @@ uintptr_t initial_allocation = 32; struct list_s *lst = malloc(LIST_SETSIZE(initial_allocation)); if (lst == NULL) - stm_fatalerror("out of memory in list_create\n"); /* XXX */ + stm_fatalerror("out of memory in list_create"); /* XXX */ lst->count = 0; lst->last_allocated = initial_allocation - 1; @@ -24,7 +24,7 @@ nalloc = LIST_OVERCNT(nalloc); lst = realloc(lst, LIST_SETSIZE(nalloc)); if (lst == NULL) - stm_fatalerror("out of memory in _list_grow\n"); /* XXX */ + stm_fatalerror("out of memory in _list_grow"); /* XXX */ lst->last_allocated = nalloc - 1; return lst; @@ -93,7 +93,7 @@ //fprintf(stderr, "growth: %ld\n", newalloc); char *newitems = malloc(newalloc); if (newitems == NULL) { - stm_fatalerror("out of memory!\n"); /* XXX */ + stm_fatalerror("out of memory!"); /* XXX */ } newtree.raw_start = newitems; newtree.raw_current = newitems; diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -97,7 +97,7 @@ int res = remap_file_pages(addr, size, 0, pgoff, 0); if (UNLIKELY(res < 0)) - stm_fatalerror("remap_file_pages: %m\n"); + stm_fatalerror("remap_file_pages: %m"); } static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count) diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -40,24 +40,24 @@ static void setup_sync(void) { if (pthread_mutex_init(&sync_ctl.global_mutex, NULL) != 0) - stm_fatalerror("mutex initialization: %m\n"); + stm_fatalerror("mutex initialization: %m"); long i; for (i = 0; i < _C_TOTAL; i++) { if (pthread_cond_init(&sync_ctl.cond[i], NULL) != 0) - stm_fatalerror("cond initialization: %m\n"); + stm_fatalerror("cond initialization: %m"); } } static void teardown_sync(void) { if (pthread_mutex_destroy(&sync_ctl.global_mutex) != 0) - stm_fatalerror("mutex destroy: %m\n"); + stm_fatalerror("mutex destroy: %m"); long i; for (i = 0; i < _C_TOTAL; i++) { if (pthread_cond_destroy(&sync_ctl.cond[i]) != 0) - stm_fatalerror("cond destroy: %m\n"); + stm_fatalerror("cond destroy: %m"); } } @@ -77,14 +77,14 @@ static void set_gs_register(char *value) { if (UNLIKELY(syscall(SYS_arch_prctl, ARCH_SET_GS, (uint64_t)value) != 0)) - stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m\n"); + stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m"); } static inline void s_mutex_lock(void) { assert(!_has_mutex_here); if (UNLIKELY(pthread_mutex_lock(&sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_mutex_lock: %m\n"); + stm_fatalerror("pthread_mutex_lock: %m"); assert((_has_mutex_here = true, 1)); } @@ -92,32 +92,32 @@ { assert(_has_mutex_here); if (UNLIKELY(pthread_mutex_unlock(&sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_mutex_unlock: %m\n"); + stm_fatalerror("pthread_mutex_unlock: %m"); assert((_has_mutex_here = false, 1)); } static inline void cond_wait(enum cond_type_e ctype) { #ifdef STM_NO_COND_WAIT - stm_fatalerror("*** cond_wait/%d called!\n", (int)ctype); + stm_fatalerror("*** cond_wait/%d called!", (int)ctype); #endif assert(_has_mutex_here); if (UNLIKELY(pthread_cond_wait(&sync_ctl.cond[ctype], &sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_cond_wait/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_wait/%d: %m", (int)ctype); } static inline void cond_signal(enum cond_type_e ctype) { if (UNLIKELY(pthread_cond_signal(&sync_ctl.cond[ctype]) != 0)) - stm_fatalerror("pthread_cond_signal/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_signal/%d: %m", (int)ctype); } static inline void cond_broadcast(enum cond_type_e ctype) { if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.cond[ctype]) != 0)) - stm_fatalerror("pthread_cond_broadcast/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_broadcast/%d: %m", (int)ctype); } /************************************************************/ From noreply at buildbot.pypy.org Tue Mar 18 14:06:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 14:06:18 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: progress Message-ID: <20140318130618.41FFC1D297A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1069:a1db46a027b5 Date: 2014-03-18 14:06 +0100 http://bitbucket.org/pypy/stmgc/changeset/a1db46a027b5/ Log: progress diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -8,10 +8,12 @@ static char *fork_big_copy = NULL; +static stm_thread_local_t *fork_this_tl; static char *setup_mmap(char *reason); /* forward, in setup.c */ static void do_or_redo_setup_after_fork(void); /* forward, in setup.c */ static void do_or_redo_teardown_after_fork(void); /* forward, in setup.c */ +static pthread_t *_get_cpth(stm_thread_local_t *);/* forward, in setup.c */ static void forksupport_prepare(void) @@ -19,15 +21,35 @@ if (stm_object_pages == NULL) return; - /* This silently assumes that fork() is not called from transactions. - It's hard to check though... - */ + /* This assumes that fork() is not called from transactions. + So far we attempt to check this by walking all stm_thread_local_t, + marking the one from the current thread, and verifying that it's not + running a transaction. This assumes that the stm_thread_local_t is just + a __thread variable, so never changes threads. + */ s_mutex_lock(); synchronize_all_threads(); mutex_pages_lock(); + fork_this_tl = NULL; + stm_thread_local_t *tl = stm_all_thread_locals; + do { + if (pthread_equal(*_get_cpth(tl), pthread_self())) { + if (_stm_in_transaction(tl)) + stm_fatalerror("fork(): cannot be used inside a transaction"); + if (fork_this_tl != NULL) + stm_fatalerror("fork(): found several stm_thread_local_t" + " from the same thread"); + fork_this_tl = tl; + } + tl = tl->next; + } while (tl != stm_all_thread_locals); + + if (fork_this_tl == NULL) + stm_fatalerror("fork(): found no stm_thread_local_t from this thread"); + char *big_copy = setup_mmap("stmgc's fork support"); uintptr_t pagenum, endpagenum; @@ -80,6 +102,15 @@ mutex_pages_unlock(); s_mutex_unlock(); + stm_thread_local_t *tl = stm_all_thread_locals; + do { + stm_thread_local_t *nexttl = tl->next; + if (tl != fork_this_tl) { + stm_unregister_thread_local(tl); + } + tl = nexttl; + } while (tl != stm_all_thread_locals); + do_or_redo_teardown_after_fork(); assert(fork_big_copy != NULL); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -139,7 +139,7 @@ teardown_pages(); } -void _init_shadow_stack(stm_thread_local_t *tl) +static void _init_shadow_stack(stm_thread_local_t *tl) { struct stm_shadowentry_s *s = (struct stm_shadowentry_s *) malloc(SHADOW_STACK_SIZE * sizeof(struct stm_shadowentry_s)); @@ -148,13 +148,18 @@ tl->shadowstack_base = s; } -void _done_shadow_stack(stm_thread_local_t *tl) +static void _done_shadow_stack(stm_thread_local_t *tl) { free(tl->shadowstack_base); tl->shadowstack = NULL; tl->shadowstack_base = NULL; } +static pthread_t *_get_cpth(stm_thread_local_t *tl) +{ + assert(sizeof(pthread_t) <= sizeof(tl->creating_pthread)); + return (pthread_t *)(tl->creating_pthread); +} void stm_register_thread_local(stm_thread_local_t *tl) { @@ -178,6 +183,7 @@ numbers automatically. */ num = (num % NB_SEGMENTS) + 1; tl->associated_segment_num = num; + *_get_cpth(tl) = pthread_self(); _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); s_mutex_unlock(); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -69,6 +69,7 @@ /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; + long creating_pthread[4]; } stm_thread_local_t; /* this should use llvm's coldcc calling convention, From noreply at buildbot.pypy.org Tue Mar 18 15:31:41 2014 From: noreply at buildbot.pypy.org (timfel) Date: Tue, 18 Mar 2014 15:31:41 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies: change jit.py so we can just run code Message-ID: <20140318143141.B523A1C0A66@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: strategies Changeset: r660:ae6d13449b51 Date: 2014-03-18 13:10 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/ae6d13449b51/ Log: change jit.py so we can just run code diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -3,6 +3,10 @@ # # view jit. # +Code = """ +(1 to: 2000) asOrderedCollection +""" + import sys, os from rpython import conftest @@ -52,15 +56,34 @@ class TestLLtype(LLJitMixin): def test_miniloop(self): - + import time from spyvm import objspace space = objspace.ObjSpace() image = create_testimage(space) interp = interpreter.Interpreter(space, image) - w_selector = interp.perform(space.wrap_string('loopTest2'), "asSymbol") + + selector = "codeTest%d" % int(time.time()) + try: + w_result = interp.perform( + interp.space.w_SmallInteger, + "compile:classified:notifying:", + space.wrap_string("%s\r\n%s" % (selector, Code)), + space.wrap_string("spy-run-code"), + space.w_nil + ) + except interpreter.ReturnFromTopLevel, e: + print e.object + return 1 + except error.Exit, e: + print e.msg + return 1 + + w_selector = interp.perform(space.wrap_string(selector), "asSymbol") assert isinstance(w_selector, model.W_BytesObject) def interp_w(): - interp.perform(model.W_SmallInteger(1000), w_selector) + interp.perform(space.wrap_int(0), w_selector) + self.meta_interp(interp_w, [], listcomp=True, listops=True, backendopt=True, inline=True) - self.meta_interp(interp_w, [], listcomp=True, listops=True, backendopt=True, inline=True) +if __name__ == "__main__": + TestLLtype().test_miniloop() From noreply at buildbot.pypy.org Tue Mar 18 15:31:42 2014 From: noreply at buildbot.pypy.org (timfel) Date: Tue, 18 Mar 2014 15:31:42 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies: merge default Message-ID: <20140318143142.DB6411C0A66@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: strategies Changeset: r661:20795bd19094 Date: 2014-03-18 13:11 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/20795bd19094/ Log: merge default diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -5,6 +5,8 @@ from rpython.rlib import rarithmetic, jit from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.debug import make_sure_not_resized + elidable_func.func_name = "elidable_" + func.func_name + meth.func_name = "elidable_meth_" + func.func_name class AbstractShadow(object): """A shadow is an optional extra bit of information that From noreply at buildbot.pypy.org Tue Mar 18 15:31:44 2014 From: noreply at buildbot.pypy.org (timfel) Date: Tue, 18 Mar 2014 15:31:44 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies: merge tip Message-ID: <20140318143144.113121C0A66@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: strategies Changeset: r662:28eef032af8c Date: 2014-03-18 15:28 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/28eef032af8c/ Log: merge tip diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -5,8 +5,6 @@ from rpython.rlib import rarithmetic, jit from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.debug import make_sure_not_resized - elidable_func.func_name = "elidable_" + func.func_name - meth.func_name = "elidable_meth_" + func.func_name class AbstractShadow(object): """A shadow is an optional extra bit of information that diff --git a/spyvm/version.py b/spyvm/version.py --- a/spyvm/version.py +++ b/spyvm/version.py @@ -8,6 +8,8 @@ return func(self, *args) def meth(self, *args): return elidable_func(self, self.version, *args) + elidable_func.func_name = "elidable_" + func.func_name + meth.func_name = "elidable_meth_" + func.func_name return meth # In addition to marking the decorated function as "pure", both the receiver From noreply at buildbot.pypy.org Tue Mar 18 15:31:45 2014 From: noreply at buildbot.pypy.org (timfel) Date: Tue, 18 Mar 2014 15:31:45 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies: merge upstream Message-ID: <20140318143145.2D6EA1C0A66@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: strategies Changeset: r663:072f88561376 Date: 2014-03-18 15:29 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/072f88561376/ Log: merge upstream diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -485,11 +485,15 @@ class W_AbstractPointersObject(W_AbstractObjectWithClassReference): """Common object.""" - _attrs_ = ['shadow', 'version'] - import_from_mixin(version.VersionMixin) + _attrs_ = ['shadow'] shadow = None # Default value + def changed(self): + # This is called whenever an instance variable is changed on the receiver. + # Was used with a version variable before. Left here in case it might be usefull in the future. + pass + @jit.unroll_safe def __init__(self, space, w_class, size): """Create new object with size = fixed + variable size.""" @@ -537,7 +541,6 @@ assert self.shadow is None or self.shadow is shadow self.shadow = shadow - @elidable_for_version def _get_shadow(self): return self.shadow @@ -680,11 +683,9 @@ def set_storage(self, storage): self._storage = storage - @elidable_for_version def get_storage(self): return self._storage - @elidable_for_version def get_strategy(self): return self.strategy From noreply at buildbot.pypy.org Tue Mar 18 15:43:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 15:43:20 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: in-progress Message-ID: <20140318144320.5B4421C0166@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1070:bef9be522fb9 Date: 2014-03-18 15:43 +0100 http://bitbucket.org/pypy/stmgc/changeset/bef9be522fb9/ Log: in-progress diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -4,20 +4,24 @@ #include #include #include +#include +#include #include "stmgc.h" #define NUMTHREADS 3 -#define STEPS_PER_THREAD 500 -#define THREAD_STARTS 1000 // how many restarts of threads +#define STEPS_PER_THREAD 300 +#define THREAD_STARTS 300 // how many restarts of threads #define PREBUILT_ROOTS 3 #define MAXROOTS 1000 +#define FORKS 3 // SUPPORT struct node_s; typedef TLPREFIX struct node_s node_t; typedef node_t* nodeptr_t; typedef object_t* objptr_t; +int num_forked_children = 0; struct node_s { struct object_s hdr; @@ -337,6 +341,22 @@ push_roots(); stm_commit_transaction(); + if (arg) { + printf("========== FORK =========\n"); + arg = NULL; + pid_t child = fork(); + printf("=== in process %d thread %lx, fork() returned %d\n", + (int)getpid(), (long)pthread_self(), (int)child); + if (child == -1) { + fprintf(stderr, "fork() error: %m\n"); + abort(); + } + if (child != 0) + num_forked_children++; + else + num_forked_children = 0; + } + td.num_roots_at_transaction_start = td.num_roots; if (get_rand(100) < 98) { @@ -427,8 +447,24 @@ assert(status == 0); printf("thread finished\n"); if (thread_starts) { + long forkbase = NUMTHREADS * THREAD_STARTS / (FORKS + 1); + long _fork = (thread_starts % forkbase) == 0; thread_starts--; - newthread(demo_random, NULL); + newthread(demo_random, (void *)_fork); + } + } + + for (i = 0; i < num_forked_children; i++) { + pid_t child = wait(&status); + if (child == -1) + perror("wait"); + printf("From %d: child %d terminated with exit status %d\n", + (int)getpid(), (int)child, status); + if (WIFEXITED(status) && WEXITSTATUS(status) == 0) + ; + else { + printf("*** error from the child ***\n"); + return 1; } } diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -28,10 +28,9 @@ a __thread variable, so never changes threads. */ s_mutex_lock(); + mutex_pages_lock(); - synchronize_all_threads(); - - mutex_pages_lock(); + dprintf(("forksupport_prepare: synchronized all threads\n")); fork_this_tl = NULL; stm_thread_local_t *tl = stm_all_thread_locals; @@ -50,11 +49,29 @@ if (fork_this_tl == NULL) stm_fatalerror("fork(): found no stm_thread_local_t from this thread"); + /* Make a new mmap at some other address, but of the same size as + the standard mmap at stm_object_pages + */ char *big_copy = setup_mmap("stmgc's fork support"); + /* Copy each of the segment infos into the new mmap + */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *src = get_priv_segment(i); + char *dst = big_copy + (((char *)src) - stm_object_pages); + *(struct stm_priv_segment_info_s *)dst = *src; + } + + /* Copy all the data from the two ranges of objects (large, small) + into the new mmap --- but only the shared objects + */ uintptr_t pagenum, endpagenum; pagenum = END_NURSERY_PAGE; /* starts after the nursery */ endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; + if (endpagenum < NB_PAGES) + endpagenum++; /* the next page too, because it might contain + data from largemalloc */ while (1) { if (UNLIKELY(pagenum == endpagenum)) { @@ -63,9 +80,9 @@ if (endpagenum == NB_PAGES) break; /* done */ pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; + pagenum--; /* the prev page too, because it does contain + data from largemalloc */ endpagenum = NB_PAGES; - if (pagenum == endpagenum) - break; /* no pages in the 2nd section, so done too */ } pagecopy(big_copy + pagenum * 4096UL, @@ -82,10 +99,15 @@ if (stm_object_pages == NULL) return; + /* In the parent, after fork(), we can simply forget about the big copy + that we made for the child. + */ assert(fork_big_copy != NULL); munmap(fork_big_copy, TOTAL_MEMORY); fork_big_copy = NULL; + dprintf(("forksupport_parent: continuing to run\n")); + mutex_pages_unlock(); s_mutex_unlock(); } @@ -95,32 +117,62 @@ if (stm_object_pages == NULL) return; - /* xxx the stm_thread_local_t belonging to other threads just leak. - Note that stm_all_thread_locals is preserved across a - stm_teardown/stm_setup sequence. */ - + /* In the child, first unregister all other stm_thread_local_t, + mostly as a way to free the memory used by the shadowstacks + */ mutex_pages_unlock(); s_mutex_unlock(); - stm_thread_local_t *tl = stm_all_thread_locals; - do { - stm_thread_local_t *nexttl = tl->next; - if (tl != fork_this_tl) { - stm_unregister_thread_local(tl); - } - tl = nexttl; - } while (tl != stm_all_thread_locals); + assert(fork_this_tl != NULL); + while (stm_all_thread_locals->next != stm_all_thread_locals) { + if (stm_all_thread_locals == fork_this_tl) + stm_unregister_thread_local(stm_all_thread_locals->next); + else + stm_unregister_thread_local(stm_all_thread_locals); + } + assert(stm_all_thread_locals == fork_this_tl); - do_or_redo_teardown_after_fork(); + /* Restore a few things in the child: the new pthread_self(), and + the %gs register (although I suppose it should be preserved by + fork()) + */ + *_get_cpth(fork_this_tl) = pthread_self(); + set_gs_register(get_segment_base(fork_this_tl->associated_segment_num)); + fork_this_tl = NULL; + /* Move the copy of the mmap over the old one, overwriting it + and thus freeing the old mapping in this process + */ assert(fork_big_copy != NULL); assert(stm_object_pages != NULL); - mremap(fork_big_copy, TOTAL_MEMORY, TOTAL_MEMORY, - MREMAP_MAYMOVE | MREMAP_FIXED, - stm_object_pages); + void *res = mremap(fork_big_copy, TOTAL_MEMORY, TOTAL_MEMORY, + MREMAP_MAYMOVE | MREMAP_FIXED, + stm_object_pages); + if (res != stm_object_pages) + stm_fatalerror("after fork: mremap failed: %m"); fork_big_copy = NULL; + /* Call a subset of stm_teardown() / stm_setup() to free and + recreate the necessary data in all segments, and to clean up some + of the global data like the big arrays that don't make sense any + more. We keep other things like the smallmalloc and largemalloc + internal state. + */ + do_or_redo_teardown_after_fork(); do_or_redo_setup_after_fork(); + + /* Make all pages shared again. + */ + mutex_pages_lock(); + uintptr_t start = END_NURSERY_PAGE; + uintptr_t stop = (uninitialized_page_start - stm_object_pages) / 4096UL; + pages_initialize_shared(start, stop - start); + start = (uninitialized_page_stop - stm_object_pages) / 4096UL; + stop = NB_PAGES; + pages_initialize_shared(start, stop - start); + mutex_pages_unlock(); + + dprintf(("forksupport_child: running one thread now\n")); } diff --git a/c7/stm/fprintcolor.c b/c7/stm/fprintcolor.c --- a/c7/stm/fprintcolor.c +++ b/c7/stm/fprintcolor.c @@ -8,8 +8,8 @@ char buffer[2048]; va_list ap; int result; - int size = (int)sprintf(buffer, "\033[%dm[%lx] ", dprintfcolor(), - (long)pthread_self()); + int size = (int)sprintf(buffer, "\033[%dm[%d,%lx] ", dprintfcolor(), + (int)getpid(), (long)pthread_self()); assert(size >= 0); va_start(ap, format); diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -273,8 +273,10 @@ /* unlink the following chunk */ mscan->d.next->prev = mscan->d.prev; mscan->d.prev->next = mscan->d.next; - assert((mscan->prev_size = (size_t)-258, 1)); /* 0xfffffffffffffefe */ - assert((mscan->size = (size_t)-515, 1)); /* 0xfffffffffffffdfd */ +#ifndef NDEBUG + mscan->prev_size = (size_t)-258; /* 0xfffffffffffffefe */ + mscan->size = (size_t)-515; /* 0xfffffffffffffdfd */ +#endif /* merge the two chunks */ assert(fsize == fscan->prev_size); diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -107,6 +107,8 @@ segment 0. */ uintptr_t i; assert(_has_mutex_pages()); + if (count == 0) + return; for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); d_remap_file_pages(segment_base + pagenum * 4096UL, diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -11,41 +11,36 @@ if (result == MAP_FAILED) stm_fatalerror("%s failed: %m\n", reason); + return result; +} + +static void do_or_redo_setup_after_fork(void) +{ /* The segment 0 is not used to run transactions, but contains the shared copy of the pages. We mprotect all pages before so that accesses fail, up to and including the pages corresponding to the nurseries of the other segments. */ - mprotect(result, END_NURSERY_PAGE * 4096UL, PROT_NONE); + mprotect(stm_object_pages, END_NURSERY_PAGE * 4096UL, PROT_NONE); long i; for (i = 1; i <= NB_SEGMENTS; i++) { - char *segment_base = result + i * (NB_PAGES * 4096UL); + char *segment_base = get_segment_base(i); /* In each segment, the first page is where TLPREFIX'ed NULL accesses land. We mprotect it so that accesses fail. */ mprotect(segment_base, 4096, PROT_NONE); + /* Fill the TLS page (page 1) with 0xDC, for debugging */ + memset(REAL_ADDRESS(segment_base, 4096), 0xDC, 4096); + /* Make a "hole" at STM_PSEGMENT (which includes STM_SEGMENT) */ + memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, + sizeof(*STM_PSEGMENT)); + /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ if (FIRST_READMARKER_PAGE > 2) mprotect(segment_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, PROT_NONE); - } - - return result; -} - -static void do_or_redo_setup_after_fork(void) -{ - long i; - for (i = 1; i <= NB_SEGMENTS; i++) { - char *segment_base = get_segment_base(i); - - /* Fill the TLS page (page 1) with 0xDC, for debugging */ - memset(REAL_ADDRESS(segment_base, 4096), 0xDC, 4096); - /* Make a "hole" at STM_PSEGMENT (which includes STM_SEGMENT) */ - memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, - sizeof(*STM_PSEGMENT)); /* Initialize STM_PSEGMENT */ struct stm_priv_segment_info_s *pr = get_priv_segment(i); @@ -108,8 +103,8 @@ long i; for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(pr->objects_pointing_to_nursery == NULL); - assert(pr->large_overflow_objects == NULL); + LIST_FREE(pr->objects_pointing_to_nursery); + LIST_FREE(pr->large_overflow_objects); list_free(pr->modified_old_objects); list_free(pr->young_weakrefs); list_free(pr->old_weakrefs); From noreply at buildbot.pypy.org Tue Mar 18 15:46:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Mar 2014 15:46:40 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: Reset this state after fork too Message-ID: <20140318144640.1C9D91C0166@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1071:2c4acbf96895 Date: 2014-03-18 15:46 +0100 http://bitbucket.org/pypy/stmgc/changeset/2c4acbf96895/ Log: Reset this state after fork too diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -36,6 +36,10 @@ char reserved[192]; } sync_ctl __attribute__((aligned(64))); +#ifndef NDEBUG +static bool _safe_points_requested = false; +#endif + static void setup_sync(void) { @@ -64,6 +68,10 @@ static void teardown_sync_1(void) { memset(&sync_ctl, 0, sizeof(sync_ctl)); +#ifndef NDEBUG + _safe_points_requested = false; +#endif + pause_signalled = false; } #ifndef NDEBUG @@ -255,10 +263,6 @@ /************************************************************/ -#ifndef NDEBUG -static bool _safe_points_requested = false; -#endif - static void signal_everybody_to_pause_running(void) { assert(_safe_points_requested == false); From noreply at buildbot.pypy.org Tue Mar 18 16:48:22 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 18 Mar 2014 16:48:22 +0100 (CET) Subject: [pypy-commit] pypy refactor-buffer-api: Close to-be-merged branch. Message-ID: <20140318154822.9BB6A1D26D3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r70052:0ae760fa2735 Date: 2014-03-18 16:21 +0100 http://bitbucket.org/pypy/pypy/changeset/0ae760fa2735/ Log: Close to-be-merged branch. From noreply at buildbot.pypy.org Tue Mar 18 16:48:24 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 18 Mar 2014 16:48:24 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge refactor-buffer-api Message-ID: <20140318154824.A82851D26D3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r70053:5cff90211004 Date: 2014-03-18 16:41 +0100 http://bitbucket.org/pypy/pypy/changeset/5cff90211004/ Log: hg merge refactor-buffer-api Separate the interp-level buffer API from the buffer type exposed to app-level. The `Buffer` class is now used by `W_MemoryView` and `W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was an alias to `Buffer`, which was wrappable itself. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -194,6 +194,15 @@ def immutable_unique_id(self, space): return None + def buffer_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + from pypy.module.__builtin__.interp_memoryview import W_Buffer + if isinstance(w_result, W_Buffer): + return w_result.buf + self._typed_unwrap_error(space, "buffer") + def str_w(self, space): self._typed_unwrap_error(space, "string") @@ -1314,10 +1323,7 @@ 'to unsigned int')) def buffer_w(self, w_obj): - # returns a Buffer instance - from pypy.interpreter.buffer import Buffer - w_buffer = self.buffer(w_obj) - return self.interp_w(Buffer, w_buffer) + return w_obj.buffer_w(self) def rwbuffer_w(self, w_obj): # returns a RWBuffer instance @@ -1677,7 +1683,6 @@ ('set', 'set', 3, ['__set__']), ('delete', 'delete', 2, ['__delete__']), ('userdel', 'del', 1, ['__del__']), - ('buffer', 'buffer', 1, ['__buffer__']), # see buffer.py ] ObjSpace.BuiltinModuleTable = [ diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -1,30 +1,12 @@ """ Buffer protocol support. """ +from pypy.interpreter.error import OperationError +from rpython.rlib.objectmodel import import_from_mixin -# The implementation of the buffer protocol. The basic idea is that we -# can ask any app-level object for a 'buffer' view on it, by calling its -# __buffer__() special method. It should return a wrapped instance of a -# subclass of the Buffer class defined below. Note that __buffer__() is -# a PyPy-only extension to the Python language, made necessary by the -# fact that it's not natural in PyPy to hack an interp-level-only -# interface. -# In normal usage, the convenience method space.buffer_w() should be -# used to get directly a Buffer instance. Doing so also gives you for -# free the typecheck that __buffer__() really returned a wrapped Buffer. - -import operator -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash, import_from_mixin -from rpython.rlib.rstring import StringBuilder - - -class Buffer(W_Root): - """Abstract base class for memory views.""" +class Buffer(object): + """Abstract base class for buffers.""" __slots__ = () # no extra slot here @@ -47,94 +29,12 @@ def get_raw_address(self): raise ValueError("no raw buffer") + def is_writable(self): return False - # __________ app-level support __________ - - def descr_len(self, space): - return space.wrap(self.getlength()) - - def descr_getitem(self, space, w_index): - start, stop, step, size = space.decode_index4(w_index, self.getlength()) - if step == 0: # index only - return space.wrap(self.getitem(start)) - res = self.getslice(start, stop, step, size) - return space.wrap(res) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - if not isinstance(self, RWBuffer): - raise OperationError(space.w_TypeError, - space.wrap("buffer is read-only")) - start, stop, step, size = space.decode_index4(w_index, self.getlength()) - if step == 0: # index only - if len(newstring) != 1: - msg = 'buffer[index]=x: x must be a single character' - raise OperationError(space.w_TypeError, space.wrap(msg)) - char = newstring[0] # annotator hint - self.setitem(start, char) - elif step == 1: - if len(newstring) != size: - msg = "right operand length must match slice length" - raise OperationError(space.w_ValueError, space.wrap(msg)) - self.setslice(start, newstring) - else: - raise OperationError(space.w_ValueError, - space.wrap("buffer object does not support" - " slicing with a step")) - - def descr__buffer__(self, space): - return space.wrap(self) - - def descr_str(self, space): - return space.wrap(self.as_str()) - - @unwrap_spec(other='bufferstr') - def descr_add(self, space, other): - return space.wrap(self.as_str() + other) - - def _make_descr__cmp(name): - def descr__cmp(self, space, w_other): - if not isinstance(w_other, Buffer): - return space.w_NotImplemented - # xxx not the most efficient implementation - str1 = self.as_str() - str2 = w_other.as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - descr__cmp.func_name = name - return descr__cmp - - descr_eq = _make_descr__cmp('eq') - descr_ne = _make_descr__cmp('ne') - descr_lt = _make_descr__cmp('lt') - descr_le = _make_descr__cmp('le') - descr_gt = _make_descr__cmp('gt') - descr_ge = _make_descr__cmp('ge') - - def descr_hash(self, space): - return space.wrap(compute_hash(self.as_str())) - - def descr_mul(self, space, w_times): - # xxx not the most efficient implementation - w_string = space.wrap(self.as_str()) - # use the __mul__ method instead of space.mul() so that we - # return NotImplemented instead of raising a TypeError - return space.call_method(w_string, '__mul__', w_times) - - def descr_repr(self, space): - if isinstance(self, RWBuffer): - info = 'read-write buffer' - else: - info = 'read-only buffer' - addrstring = self.getaddrstring(space) - - return space.wrap("<%s for 0x%s, size %d>" % - (info, addrstring, self.getlength())) - - class RWBuffer(Buffer): - """Abstract base class for read-write memory views.""" + """Abstract base class for read-write buffers.""" __slots__ = () # no extra slot here @@ -151,72 +51,6 @@ self.setitem(start + i, string[i]) - at unwrap_spec(offset=int, size=int) -def descr_buffer__new__(space, w_subtype, w_object, offset=0, size=-1): - # w_subtype can only be exactly 'buffer' for now - if not space.is_w(w_subtype, space.gettypefor(Buffer)): - raise OperationError(space.w_TypeError, - space.wrap("argument 1 must be 'buffer'")) - - if space.isinstance_w(w_object, space.w_unicode): - # unicode objects support the old buffer interface - # but not the new buffer interface (change in python 2.7) - from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE - unistr = space.unicode_w(w_object) - builder = StringBuilder(len(unistr) * UNICODE_SIZE) - for unich in unistr: - pack_unichar(unich, builder) - from pypy.interpreter.buffer import StringBuffer - w_buffer = space.wrap(StringBuffer(builder.build())) - else: - w_buffer = space.buffer(w_object) - - buffer = space.interp_w(Buffer, w_buffer) # type-check - if offset == 0 and size == -1: - return w_buffer - # handle buffer slices - if offset < 0: - raise OperationError(space.w_ValueError, - space.wrap("offset must be zero or positive")) - if size < -1: - raise OperationError(space.w_ValueError, - space.wrap("size must be zero or positive")) - if isinstance(buffer, RWBuffer): - buffer = RWSubBuffer(buffer, offset, size) - else: - buffer = SubBuffer(buffer, offset, size) - return space.wrap(buffer) - - -Buffer.typedef = TypeDef( - "buffer", - __doc__ = """\ -buffer(object [, offset[, size]]) - -Create a new buffer object which references the given object. -The buffer will reference a slice of the target object from the -start of the object (or at the specified offset). The slice will -extend to the end of the target object (or with the specified size). -""", - __new__ = interp2app(descr_buffer__new__), - __len__ = interp2app(Buffer.descr_len), - __getitem__ = interp2app(Buffer.descr_getitem), - __setitem__ = interp2app(Buffer.descr_setitem), - __buffer__ = interp2app(Buffer.descr__buffer__), - __str__ = interp2app(Buffer.descr_str), - __add__ = interp2app(Buffer.descr_add), - __eq__ = interp2app(Buffer.descr_eq), - __ne__ = interp2app(Buffer.descr_ne), - __lt__ = interp2app(Buffer.descr_lt), - __le__ = interp2app(Buffer.descr_le), - __gt__ = interp2app(Buffer.descr_gt), - __ge__ = interp2app(Buffer.descr_ge), - __hash__ = interp2app(Buffer.descr_hash), - __mul__ = interp2app(Buffer.descr_mul), - __rmul__ = interp2app(Buffer.descr_mul), - __repr__ = interp2app(Buffer.descr_repr), -) -Buffer.typedef.acceptable_as_base_class = False # ____________________________________________________________ diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py --- a/pypy/interpreter/test/test_buffer.py +++ b/pypy/interpreter/test/test_buffer.py @@ -1,5 +1,5 @@ import py -from pypy.interpreter.buffer import Buffer +from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.tool.udir import udir testdir = udir.ensure('test_buffer', dir=1) @@ -11,19 +11,17 @@ space = self.space w_hello = space.wrap('hello world') buf = space.buffer_w(w_hello) - assert isinstance(buf, Buffer) assert buf.getlength() == 11 assert buf.as_str() == 'hello world' assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.wrap(buf)) is buf + assert space.buffer_w(W_Buffer(buf)) is buf assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.buffer(w_hello)) == 'hello world' + assert space.bufferstr_w(W_Buffer(space.buffer_w(w_hello))) == 'hello world' space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) - space.raises_w(space.w_TypeError, space.buffer, space.wrap(5)) def test_file_write(self): space = self.space - w_buffer = space.buffer(space.wrap('hello world')) + w_buffer = W_Buffer(space.buffer_w(space.wrap('hello world'))) filename = str(testdir.join('test_file_write')) space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): f = open(filename, 'wb') diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -1,14 +1,168 @@ """ Implementation of the 'buffer' and 'memoryview' types. """ +import operator + +from pypy.interpreter import buffer from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import buffer +from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.error import OperationError -import operator +from rpython.rlib.objectmodel import compute_hash +from rpython.rlib.rstring import StringBuilder -W_Buffer = buffer.Buffer # actually implemented in pypy.interpreter.buffer + +def _buffer_setitem(space, buf, w_index, newstring): + start, stop, step, size = space.decode_index4(w_index, buf.getlength()) + if step == 0: # index only + if len(newstring) != 1: + msg = 'buffer[index]=x: x must be a single character' + raise OperationError(space.w_TypeError, space.wrap(msg)) + char = newstring[0] # annotator hint + buf.setitem(start, char) + elif step == 1: + if len(newstring) != size: + msg = "right operand length must match slice length" + raise OperationError(space.w_ValueError, space.wrap(msg)) + buf.setslice(start, newstring) + else: + raise OperationError(space.w_ValueError, + space.wrap("buffer object does not support" + " slicing with a step")) + + +class W_Buffer(W_Root): + """Implement the built-in 'buffer' type as a thin wrapper around + an interp-level buffer. + """ + + def __init__(self, buf): + self.buf = buf + + def buffer_w(self, space): + return self.buf + + @staticmethod + @unwrap_spec(offset=int, size=int) + def descr_new_buffer(space, w_subtype, w_object, offset=0, size=-1): + if space.isinstance_w(w_object, space.w_unicode): + # unicode objects support the old buffer interface + # but not the new buffer interface (change in python 2.7) + from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE + unistr = space.unicode_w(w_object) + builder = StringBuilder(len(unistr) * UNICODE_SIZE) + for unich in unistr: + pack_unichar(unich, builder) + from pypy.interpreter.buffer import StringBuffer + buf = StringBuffer(builder.build()) + else: + buf = space.buffer_w(w_object) + + if offset == 0 and size == -1: + return W_Buffer(buf) + # handle buffer slices + if offset < 0: + raise OperationError(space.w_ValueError, + space.wrap("offset must be zero or positive")) + if size < -1: + raise OperationError(space.w_ValueError, + space.wrap("size must be zero or positive")) + if isinstance(buf, buffer.RWBuffer): + buf = buffer.RWSubBuffer(buf, offset, size) + else: + buf = buffer.SubBuffer(buf, offset, size) + return W_Buffer(buf) + + def descr_len(self, space): + return space.wrap(self.buf.getlength()) + + def descr_getitem(self, space, w_index): + start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) + if step == 0: # index only + return space.wrap(self.buf.getitem(start)) + res = self.buf.getslice(start, stop, step, size) + return space.wrap(res) + + @unwrap_spec(newstring='bufferstr') + def descr_setitem(self, space, w_index, newstring): + if not isinstance(self.buf, buffer.RWBuffer): + raise OperationError(space.w_TypeError, + space.wrap("buffer is read-only")) + _buffer_setitem(space, self.buf, w_index, newstring) + + def descr_str(self, space): + return space.wrap(self.buf.as_str()) + + @unwrap_spec(other='bufferstr') + def descr_add(self, space, other): + return space.wrap(self.buf.as_str() + other) + + def _make_descr__cmp(name): + def descr__cmp(self, space, w_other): + if not isinstance(w_other, W_Buffer): + return space.w_NotImplemented + # xxx not the most efficient implementation + str1 = self.buf.as_str() + str2 = w_other.buf.as_str() + return space.wrap(getattr(operator, name)(str1, str2)) + descr__cmp.func_name = name + return descr__cmp + + descr_eq = _make_descr__cmp('eq') + descr_ne = _make_descr__cmp('ne') + descr_lt = _make_descr__cmp('lt') + descr_le = _make_descr__cmp('le') + descr_gt = _make_descr__cmp('gt') + descr_ge = _make_descr__cmp('ge') + + def descr_hash(self, space): + return space.wrap(compute_hash(self.buf.as_str())) + + def descr_mul(self, space, w_times): + # xxx not the most efficient implementation + w_string = space.wrap(self.buf.as_str()) + # use the __mul__ method instead of space.mul() so that we + # return NotImplemented instead of raising a TypeError + return space.call_method(w_string, '__mul__', w_times) + + def descr_repr(self, space): + if isinstance(self.buf, buffer.RWBuffer): + info = 'read-write buffer' + else: + info = 'read-only buffer' + addrstring = self.getaddrstring(space) + + return space.wrap("<%s for 0x%s, size %d>" % + (info, addrstring, self.buf.getlength())) + +W_Buffer.typedef = TypeDef( + "buffer", + __doc__ = """\ +buffer(object [, offset[, size]]) + +Create a new buffer object which references the given object. +The buffer will reference a slice of the target object from the +start of the object (or at the specified offset). The slice will +extend to the end of the target object (or with the specified size). +""", + __new__ = interp2app(W_Buffer.descr_new_buffer), + __len__ = interp2app(W_Buffer.descr_len), + __getitem__ = interp2app(W_Buffer.descr_getitem), + __setitem__ = interp2app(W_Buffer.descr_setitem), + __str__ = interp2app(W_Buffer.descr_str), + __add__ = interp2app(W_Buffer.descr_add), + __eq__ = interp2app(W_Buffer.descr_eq), + __ne__ = interp2app(W_Buffer.descr_ne), + __lt__ = interp2app(W_Buffer.descr_lt), + __le__ = interp2app(W_Buffer.descr_le), + __gt__ = interp2app(W_Buffer.descr_gt), + __ge__ = interp2app(W_Buffer.descr_ge), + __hash__ = interp2app(W_Buffer.descr_hash), + __mul__ = interp2app(W_Buffer.descr_mul), + __rmul__ = interp2app(W_Buffer.descr_mul), + __repr__ = interp2app(W_Buffer.descr_repr), +) +W_Buffer.typedef.acceptable_as_base_class = False class W_MemoryView(W_Root): @@ -17,9 +171,16 @@ """ def __init__(self, buf): - assert isinstance(buf, buffer.Buffer) self.buf = buf + def buffer_w(self, space): + return self.buf + + @staticmethod + def descr_new_memoryview(space, w_subtype, w_object): + w_memoryview = W_MemoryView(space.buffer_w(w_object)) + return w_memoryview + def _make_descr__cmp(name): def descr__cmp(self, space, w_other): if isinstance(w_other, W_MemoryView): @@ -29,14 +190,14 @@ return space.wrap(getattr(operator, name)(str1, str2)) try: - w_buf = space.buffer(w_other) + buf = space.buffer_w(w_other) except OperationError, e: if not e.match(space, space.w_TypeError): raise return space.w_NotImplemented else: str1 = self.as_str() - str2 = space.buffer_w(w_buf).as_str() + str2 = buf.as_str() return space.wrap(getattr(operator, name)(str1, str2)) descr__cmp.func_name = name return descr__cmp @@ -102,15 +263,13 @@ @unwrap_spec(newstring='bufferstr') def descr_setitem(self, space, w_index, newstring): - buf = self.buf - if isinstance(buf, buffer.RWBuffer): - buf.descr_setitem(space, w_index, newstring) - else: + if not isinstance(self.buf, buffer.RWBuffer): raise OperationError(space.w_TypeError, space.wrap("cannot modify read-only memory")) + _buffer_setitem(space, self.buf, w_index, newstring) def descr_len(self, space): - return self.buf.descr_len(space) + return space.wrap(self.buf.getlength()) def w_get_format(self, space): return space.wrap("B") @@ -134,18 +293,12 @@ # I've never seen anyone filling this field return space.w_None - -def descr_new(space, w_subtype, w_object): - memoryview = W_MemoryView(space.buffer(w_object)) - return space.wrap(memoryview) - W_MemoryView.typedef = TypeDef( "memoryview", __doc__ = """\ Create a new memoryview object which references the given object. """, - __new__ = interp2app(descr_new), - __buffer__ = interp2app(W_MemoryView.descr_buffer), + __new__ = interp2app(W_MemoryView.descr_new_memoryview), __eq__ = interp2app(W_MemoryView.descr_eq), __ge__ = interp2app(W_MemoryView.descr_ge), __getitem__ = interp2app(W_MemoryView.descr_getitem), diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -4,6 +4,7 @@ from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.gateway import unwrap_spec +from pypy.module.__builtin__.interp_memoryview import W_Buffer class ByteBuffer(RWBuffer): @@ -23,4 +24,4 @@ @unwrap_spec(length=int) def bytebuffer(space, length): - return space.wrap(ByteBuffer(length)) + return W_Buffer(ByteBuffer(length)) diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -4,6 +4,7 @@ from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi @@ -39,38 +40,19 @@ copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) -class MiniBuffer(W_Root): - # a different subclass of W_Root for the MiniBuffer, because we - # want a slightly different (simplified) API at the level of Python. +# Override the typedef to narrow down the interface that's exposed to app-level +class MiniBuffer(W_Buffer): def __init__(self, buffer, keepalive=None): - self.buffer = buffer + W_Buffer.__init__(self, buffer) self.keepalive = keepalive - def descr_len(self, space): - return self.buffer.descr_len(space) - - def descr_getitem(self, space, w_index): - return self.buffer.descr_getitem(space, w_index) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - self.buffer.descr_setitem(space, w_index, newstring) - - def descr__buffer__(self, space): - return self.buffer.descr__buffer__(space) - - def descr_str(self, space): - return space.wrap(self.buffer.as_str()) - - MiniBuffer.typedef = TypeDef( "buffer", __module__ = "_cffi_backend", __len__ = interp2app(MiniBuffer.descr_len), __getitem__ = interp2app(MiniBuffer.descr_getitem), __setitem__ = interp2app(MiniBuffer.descr_setitem), - __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), __str__ = interp2app(MiniBuffer.descr_str), ) diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -5,6 +5,7 @@ TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.buffer import RWBuffer +from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -511,7 +512,7 @@ def _raw_read(self, space, buffer, start, length): length = intmask(length) - w_buf = space.wrap(RawBuffer(buffer, start, length)) + w_buf = W_Buffer(RawBuffer(buffer, start, length)) while True: try: w_size = space.call_method(self.w_raw, "readinto", w_buf) diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -207,7 +207,6 @@ __setitem__ = interp2app(W_ArrayInstance.descr_setitem), __getitem__ = interp2app(W_ArrayInstance.descr_getitem), __len__ = interp2app(W_ArrayInstance.getlength), - __buffer__ = interp2app(W_ArrayInstance.descr_buffer), buffer = GetSetProperty(W_ArrayInstance.getbuffer), shape = interp_attrproperty('shape', W_ArrayInstance), free = interp2app(W_ArrayInstance.free), @@ -232,7 +231,6 @@ __setitem__ = interp2app(W_ArrayInstance.descr_setitem), __getitem__ = interp2app(W_ArrayInstance.descr_getitem), __len__ = interp2app(W_ArrayInstance.getlength), - __buffer__ = interp2app(W_ArrayInstance.descr_buffer), buffer = GetSetProperty(W_ArrayInstance.getbuffer), shape = interp_attrproperty('shape', W_ArrayInstance), byptr = interp2app(W_ArrayInstance.byptr), diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -352,9 +352,9 @@ lltype.free(self.ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) - def descr_buffer(self, space): + def buffer_w(self, space): from pypy.module._rawffi.buffer import RawFFIBuffer - return space.wrap(RawFFIBuffer(self)) + return RawFFIBuffer(self) def getrawsize(self): raise NotImplementedError("abstract base class") diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -364,7 +364,6 @@ __repr__ = interp2app(W_StructureInstance.descr_repr), __getattr__ = interp2app(W_StructureInstance.getattr), __setattr__ = interp2app(W_StructureInstance.setattr), - __buffer__ = interp2app(W_StructureInstance.descr_buffer), buffer = GetSetProperty(W_StructureInstance.getbuffer), free = interp2app(W_StructureInstance.free), shape = interp_attrproperty('shape', W_StructureInstance), @@ -387,7 +386,6 @@ __repr__ = interp2app(W_StructureInstance.descr_repr), __getattr__ = interp2app(W_StructureInstance.getattr), __setattr__ = interp2app(W_StructureInstance.setattr), - __buffer__ = interp2app(W_StructureInstance.descr_buffer), buffer = GetSetProperty(W_StructureInstance.getbuffer), shape = interp_attrproperty('shape', W_StructureInstance), byptr = interp2app(W_StructureInstance.byptr), diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -132,6 +132,9 @@ self.len = 0 self.allocated = 0 + def buffer_w(self, space): + return ArrayBuffer(self) + def descr_append(self, space, w_x): """ append(x) @@ -462,9 +465,6 @@ # Misc methods - def descr_buffer(self, space): - return space.wrap(ArrayBuffer(self)) - def descr_repr(self, space): if self.len == 0: return space.wrap("array('%s')" % self.typecode) @@ -508,7 +508,6 @@ __radd__ = interp2app(W_ArrayBase.descr_radd), __rmul__ = interp2app(W_ArrayBase.descr_rmul), - __buffer__ = interp2app(W_ArrayBase.descr_buffer), __repr__ = interp2app(W_ArrayBase.descr_repr), itemsize = GetSetProperty(descr_itemsize), diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -3,8 +3,9 @@ cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, PyObjectFields, PyObject) from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref -from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer +from pypy.interpreter.buffer import StringBuffer, SubBuffer from pypy.interpreter.error import OperationError +from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.module.array.interp_array import ArrayBuffer @@ -24,7 +25,7 @@ @bootstrap_function def init_bufferobject(space): "Type description of PyBufferObject" - make_typedescr(space.gettypefor(Buffer).instancetypedef, + make_typedescr(space.gettypefor(W_Buffer).instancetypedef, basestruct=PyBufferObject.TO, attach=buffer_attach, dealloc=buffer_dealloc, @@ -39,23 +40,26 @@ rffi.setintfield(py_buf, 'c_b_readonly', 1) rffi.setintfield(py_buf, 'c_b_hash', -1) - if isinstance(w_obj, SubBuffer): - py_buf.c_b_offset = w_obj.offset - w_obj = w_obj.buffer + assert isinstance(w_obj, W_Buffer) + buf = w_obj.buf - # If w_obj already allocated a fixed buffer, use it, and keep a - # reference to w_obj. + if isinstance(buf, SubBuffer): + py_buf.c_b_offset = buf.offset + buf = buf.buffer + + # If buf already allocated a fixed buffer, use it, and keep a + # reference to buf. # Otherwise, b_base stays NULL, and we own the b_ptr. - if isinstance(w_obj, StringBuffer): + if isinstance(buf, StringBuffer): py_buf.c_b_base = lltype.nullptr(PyObject.TO) - py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.value)) - py_buf.c_b_size = w_obj.getlength() - elif isinstance(w_obj, ArrayBuffer): - w_base = w_obj.array + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(buf.value)) + py_buf.c_b_size = buf.getlength() + elif isinstance(buf, ArrayBuffer): + w_base = buf.array py_buf.c_b_base = make_ref(space, w_base) - py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, w_obj.array._charbuf_start()) - py_buf.c_b_size = w_obj.getlength() + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, buf.array._charbuf_start()) + py_buf.c_b_size = buf.getlength() else: raise OperationError(space.w_NotImplementedError, space.wrap( "buffer flavor not supported")) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -14,8 +14,9 @@ from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State +from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.buffer import Buffer as W_Buffer +from pypy.interpreter.buffer import Buffer from pypy.interpreter.argument import Arguments from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize @@ -228,7 +229,7 @@ check_num_args(space, w_args, 0) return space.wrap(generic_cpy_call(space, func_target, w_self)) -class CPyBuffer(W_Buffer): +class CPyBuffer(Buffer): # Similar to Py_buffer def __init__(self, ptr, size, w_obj): @@ -249,7 +250,7 @@ size = generic_cpy_call(space, func_target, w_self, index, ptr) if size < 0: space.fromcache(State).check_and_raise_exception(always=True) - return space.wrap(CPyBuffer(ptr[0], size, w_self)) + return W_Buffer(CPyBuffer(ptr[0], size, w_self)) def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -28,7 +28,7 @@ PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) -from pypy.interpreter.buffer import Buffer +from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.interpreter.error import OperationError from rpython.rlib.rstring import rsplit from rpython.rlib.objectmodel import specialize @@ -509,7 +509,7 @@ # buffer protocol if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) - if space.is_w(w_type, space.gettypefor(Buffer)): + if space.is_w(w_type, space.gettypefor(W_Buffer)): setup_buffer_buffer_procs(space, pto) pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -342,8 +342,8 @@ def descr_copy(self, space): return self.convert_to(space, self.get_dtype(space)) - def descr_buffer(self, space): - return self.descr_ravel(space).descr_get_data(space) + def buffer_w(self, space): + return self.descr_ravel(space).buffer_w(space) def descr_byteswap(self, space): return self.get_dtype(space).itemtype.byteswap(self) @@ -553,7 +553,6 @@ __nonzero__ = interp2app(W_GenericBox.descr_nonzero), __oct__ = interp2app(W_GenericBox.descr_oct), __hex__ = interp2app(W_GenericBox.descr_hex), - __buffer__ = interp2app(W_GenericBox.descr_buffer), __add__ = interp2app(W_GenericBox.descr_add), __sub__ = interp2app(W_GenericBox.descr_sub), diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -7,6 +7,7 @@ from rpython.rlib.rawstorage import RAW_STORAGE_PTR from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name +from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.module.micronumpy import descriptor, ufuncs, boxes, arrayops, loop, \ support, constants as NPY from pypy.module.micronumpy.appbridge import get_appbridge_cache @@ -602,8 +603,11 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "ctypes not implemented yet")) + def buffer_w(self, space): + return self.implementation.get_buffer(space) + def descr_get_data(self, space): - return self.implementation.get_buffer(space) + return W_Buffer(self.buffer_w(space)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): @@ -1259,7 +1263,6 @@ __float__ = interp2app(W_NDimArray.descr_float), __hex__ = interp2app(W_NDimArray.descr_hex), __oct__ = interp2app(W_NDimArray.descr_oct), - __buffer__ = interp2app(W_NDimArray.descr_get_data), __index__ = interp2app(W_NDimArray.descr_index), __pos__ = interp2app(W_NDimArray.descr_pos), diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -17,6 +17,10 @@ self.space = space self.mmap = mmap_obj + def buffer_w(self, space): + self.check_valid() + return MMapBuffer(self.space, self.mmap) + def close(self): self.mmap.close() @@ -196,10 +200,6 @@ self.mmap.setitem(start, value[i]) start += step - def descr_buffer(self): - self.check_valid() - return self.space.wrap(MMapBuffer(self.space, self.mmap)) - if rmmap._POSIX: @unwrap_spec(fileno=int, length=int, flags=int, @@ -256,7 +256,6 @@ __len__ = interp2app(W_MMap.__len__), __getitem__ = interp2app(W_MMap.descr_getitem), __setitem__ = interp2app(W_MMap.descr_setitem), - __buffer__ = interp2app(W_MMap.descr_buffer), ) constants = rmmap.constants diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -429,7 +429,7 @@ self.handlers = [None] * NB_HANDLERS - self.buffer_w = None + self.buffer_w_ = None self.buffer_size = 8192 self.buffer_used = 0 self.w_character_data_handler = None @@ -541,7 +541,7 @@ def buffer_string(self, space, w_string, length): ll_length = rffi.cast(lltype.Signed, length) - if self.buffer_w is not None: + if self.buffer_w_ is not None: if self.buffer_used + ll_length > self.buffer_size: self.flush_character_buffer(space) # handler might have changed; drop the rest on the floor @@ -549,11 +549,11 @@ if self.w_character_data_handler is None: return True if ll_length <= self.buffer_size: - self.buffer_w.append(w_string) + self.buffer_w_.append(w_string) self.buffer_used += ll_length return True else: - self.buffer_w = [] + self.buffer_w_ = [] self.buffer_used = 0 return False @@ -685,12 +685,12 @@ return space.wrap(parser) def flush_character_buffer(self, space): - if not self.buffer_w: + if not self.buffer_w_: return w_data = space.call_function( space.getattr(space.wrap(''), space.wrap('join')), - space.newlist(self.buffer_w)) - self.buffer_w = [] + space.newlist(self.buffer_w_)) + self.buffer_w_ = [] self.buffer_used = 0 if self.w_character_data_handler: @@ -735,14 +735,14 @@ self.buffer_size = value def get_buffer_text(self, space): - return space.wrap(self.buffer_w is not None) + return space.wrap(self.buffer_w_ is not None) def set_buffer_text(self, space, w_value): if space.is_true(w_value): - self.buffer_w = [] + self.buffer_w_ = [] self.buffer_used = 0 else: self.flush_character_buffer(space) - self.buffer_w = None + self.buffer_w_ = None def get_intern(self, space): if self.w_intern: diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -292,6 +292,11 @@ ec._py_repr = None return ec + def buffer_w(self, w_obj): + from pypy.interpreter.buffer import Buffer + is_root(w_obj) + return Buffer() + def unicode_from_object(self, w_obj): return w_some_obj() diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -27,6 +27,9 @@ """representation for debugging purposes""" return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) + def buffer_w(w_self, space): + return BytearrayBuffer(w_self.data) + def _new(self, value): return W_BytearrayObject(_make_data(value)) @@ -295,9 +298,6 @@ def descr_iter(self, space): return space.newseqiter(self) - def descr_buffer(self, space): - return BytearrayBuffer(self.data) - def descr_inplace_add(self, space, w_other): if isinstance(w_other, W_BytearrayObject): self.data += w_other.data @@ -1011,7 +1011,6 @@ __init__ = interp2app(W_BytearrayObject.descr_init, doc=BytearrayDocstrings.__init__.__doc__), - __buffer__ = interp2app(W_BytearrayObject.descr_buffer), __iadd__ = interp2app(W_BytearrayObject.descr_inplace_add, doc=BytearrayDocstrings.__iadd__.__doc__), diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -424,9 +424,6 @@ of the specified width. The string S is never truncated. """ - def descr_buffer(self, space): - pass - class W_BytesObject(W_AbstractBytesObject): import_from_mixin(StringMethods) @@ -445,6 +442,9 @@ def str_w(self, space): return self._value + def buffer_w(w_self, space): + return StringBuffer(w_self._value) + def listview_bytes(self): return _create_list_from_bytes(self._value) @@ -567,9 +567,6 @@ def descr_mod(self, space, w_values): return mod_format(space, self, w_values, do_unicode=False) - def descr_buffer(self, space): - return space.wrap(StringBuffer(self._value)) - def descr_eq(self, space, w_other): if space.config.objspace.std.withstrbuf: from pypy.objspace.std.strbufobject import W_StringBufferObject @@ -853,7 +850,6 @@ format = interpindirect2app(W_BytesObject.descr_format), __format__ = interpindirect2app(W_BytesObject.descr__format__), __mod__ = interpindirect2app(W_BytesObject.descr_mod), - __buffer__ = interpindirect2app(W_AbstractBytesObject.descr_buffer), __getnewargs__ = interpindirect2app( W_AbstractBytesObject.descr_getnewargs), _formatter_parser = interp2app(W_BytesObject.descr_formatter_parser), diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -696,7 +696,7 @@ else: # If object supports the buffer interface try: - w_buffer = space.buffer(w_value) + buf = space.buffer_w(w_value) except OperationError as e: if not e.match(space, space.w_TypeError): raise @@ -704,7 +704,6 @@ "int() argument must be a string or a number, " "not '%T'", w_value) else: - buf = space.interp_w(Buffer, w_buffer) value, w_longval = _string_to_int_or_long(space, w_value, buf.as_str()) ok = True diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -513,7 +513,7 @@ unicode_to_decimal_w(space, w_value)) else: try: - w_buffer = space.buffer(w_value) + buf = space.buffer_w(w_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -521,7 +521,6 @@ "long() argument must be a string or a number, " "not '%T'", w_value) else: - buf = space.interp_w(Buffer, w_buffer) return _string_to_w_long(space, w_longtype, w_value, buf.as_str()) else: diff --git a/pypy/objspace/std/strbufobject.py b/pypy/objspace/std/strbufobject.py --- a/pypy/objspace/std/strbufobject.py +++ b/pypy/objspace/std/strbufobject.py @@ -2,12 +2,12 @@ import py -from pypy.objspace.std.basestringtype import basestring_typedef -from pypy.objspace.std.bytesobject import W_AbstractBytesObject, W_BytesObject -from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.objspace.std.bytesobject import (W_AbstractBytesObject, + W_BytesObject, StringBuffer) from pypy.interpreter.gateway import interp2app, unwrap_spec from rpython.rlib.rstring import StringBuilder + class W_StringBufferObject(W_AbstractBytesObject): w_str = None @@ -36,6 +36,9 @@ def str_w(self, space): return self.force() + def buffer_w(self, space): + return StringBuffer(self.force()) + def descr_len(self, space): return space.wrap(self.length) From noreply at buildbot.pypy.org Tue Mar 18 16:48:25 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 18 Mar 2014 16:48:25 +0100 (CET) Subject: [pypy-commit] pypy default: Document branch. Message-ID: <20140318154825.DB0AC1D26D3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r70054:d80cbba4def2 Date: 2014-03-18 16:43 +0100 http://bitbucket.org/pypy/pypy/changeset/d80cbba4def2/ Log: Document branch. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -108,3 +108,9 @@ .. branch: virtual-raw-store-load Support for virtualizing raw_store/raw_load operations + +.. branch: refactor-buffer-api +Separate the interp-level buffer API from the buffer type exposed to +app-level. The `Buffer` class is now used by `W_MemoryView` and +`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was +an alias to `Buffer`, which was wrappable itself. From noreply at buildbot.pypy.org Tue Mar 18 16:48:27 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 18 Mar 2014 16:48:27 +0100 (CET) Subject: [pypy-commit] pypy default: Change misleading comments: calling these wrappers 'thin' is a bit overstated. Message-ID: <20140318154827.126081D26D3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r70055:a90697f1fab2 Date: 2014-03-18 16:47 +0100 http://bitbucket.org/pypy/pypy/changeset/a90697f1fab2/ Log: Change misleading comments: calling these wrappers 'thin' is a bit overstated. diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -32,7 +32,7 @@ class W_Buffer(W_Root): - """Implement the built-in 'buffer' type as a thin wrapper around + """Implement the built-in 'buffer' type as a wrapper around an interp-level buffer. """ @@ -166,7 +166,7 @@ class W_MemoryView(W_Root): - """Implement the built-in 'memoryview' type as a thin wrapper around + """Implement the built-in 'memoryview' type as a wrapper around an interp-level buffer. """ From noreply at buildbot.pypy.org Tue Mar 18 18:57:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 18:57:23 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup buffer changes Message-ID: <20140318175723.5E27D1C0A66@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70056:56dd1868523c Date: 2014-03-18 13:45 -0400 http://bitbucket.org/pypy/pypy/changeset/56dd1868523c/ Log: cleanup buffer changes diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -44,7 +44,7 @@ @staticmethod @unwrap_spec(offset=int, size=int) - def descr_new_buffer(space, w_subtype, w_object, offset=0, size=-1): + def descr_new(space, w_subtype, w_object, offset=0, size=-1): if space.isinstance_w(w_object, space.w_unicode): # unicode objects support the old buffer interface # but not the new buffer interface (change in python 2.7) @@ -145,7 +145,7 @@ start of the object (or at the specified offset). The slice will extend to the end of the target object (or with the specified size). """, - __new__ = interp2app(W_Buffer.descr_new_buffer), + __new__ = interp2app(W_Buffer.descr_new), __len__ = interp2app(W_Buffer.descr_len), __getitem__ = interp2app(W_Buffer.descr_getitem), __setitem__ = interp2app(W_Buffer.descr_setitem), @@ -177,7 +177,7 @@ return self.buf @staticmethod - def descr_new_memoryview(space, w_subtype, w_object): + def descr_new(space, w_subtype, w_object): w_memoryview = W_MemoryView(space.buffer_w(w_object)) return w_memoryview @@ -298,7 +298,7 @@ __doc__ = """\ Create a new memoryview object which references the given object. """, - __new__ = interp2app(W_MemoryView.descr_new_memoryview), + __new__ = interp2app(W_MemoryView.descr_new), __eq__ = interp2app(W_MemoryView.descr_eq), __ge__ = interp2app(W_MemoryView.descr_ge), __getitem__ = interp2app(W_MemoryView.descr_getitem), diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -429,7 +429,7 @@ self.handlers = [None] * NB_HANDLERS - self.buffer_w_ = None + self.buffer = None self.buffer_size = 8192 self.buffer_used = 0 self.w_character_data_handler = None @@ -541,7 +541,7 @@ def buffer_string(self, space, w_string, length): ll_length = rffi.cast(lltype.Signed, length) - if self.buffer_w_ is not None: + if self.buffer is not None: if self.buffer_used + ll_length > self.buffer_size: self.flush_character_buffer(space) # handler might have changed; drop the rest on the floor @@ -549,11 +549,11 @@ if self.w_character_data_handler is None: return True if ll_length <= self.buffer_size: - self.buffer_w_.append(w_string) + self.buffer.append(w_string) self.buffer_used += ll_length return True else: - self.buffer_w_ = [] + self.buffer = [] self.buffer_used = 0 return False @@ -685,12 +685,12 @@ return space.wrap(parser) def flush_character_buffer(self, space): - if not self.buffer_w_: + if not self.buffer: return w_data = space.call_function( space.getattr(space.wrap(''), space.wrap('join')), - space.newlist(self.buffer_w_)) - self.buffer_w_ = [] + space.newlist(self.buffer)) + self.buffer = [] self.buffer_used = 0 if self.w_character_data_handler: @@ -735,14 +735,14 @@ self.buffer_size = value def get_buffer_text(self, space): - return space.wrap(self.buffer_w_ is not None) + return space.wrap(self.buffer is not None) def set_buffer_text(self, space, w_value): if space.is_true(w_value): - self.buffer_w_ = [] + self.buffer = [] self.buffer_used = 0 else: self.flush_character_buffer(space) - self.buffer_w_ = None + self.buffer = None def get_intern(self, space): if self.w_intern: From noreply at buildbot.pypy.org Tue Mar 18 18:57:24 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 18:57:24 +0100 (CET) Subject: [pypy-commit] pypy default: provide space.w_buffer Message-ID: <20140318175724.960C61C0A66@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70057:bf7b7094d768 Date: 2014-03-18 13:47 -0400 http://bitbucket.org/pypy/pypy/changeset/bf7b7094d768/ Log: provide space.w_buffer diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -198,8 +198,7 @@ w_impl = space.lookup(self, '__buffer__') if w_impl is not None: w_result = space.get_and_call_function(w_impl, self) - from pypy.module.__builtin__.interp_memoryview import W_Buffer - if isinstance(w_result, W_Buffer): + if space.isinstance_w(w_result, space.w_buffer): return w_result.buf self._typed_unwrap_error(space, "buffer") diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -25,7 +25,7 @@ @bootstrap_function def init_bufferobject(space): "Type description of PyBufferObject" - make_typedescr(space.gettypefor(W_Buffer).instancetypedef, + make_typedescr(space.w_buffer.instancetypedef, basestruct=PyBufferObject.TO, attach=buffer_attach, dealloc=buffer_dealloc, diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -509,7 +509,7 @@ # buffer protocol if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) - if space.is_w(w_type, space.gettypefor(W_Buffer)): + if space.is_w(w_type, space.w_buffer): setup_buffer_buffer_procs(space, pto) pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -63,6 +63,7 @@ from pypy.objspace.std import unicodeobject from pypy.objspace.std import dictproxyobject from pypy.objspace.std import proxyobject + from pypy.module.__builtin__.interp_memoryview import W_Buffer import pypy.objspace.std.default # register a few catch-all multimethods import pypy.objspace.std.marshal_impl # install marshal multimethods @@ -82,6 +83,7 @@ self.pythontypes.append(intobject.W_IntObject.typedef) self.pythontypes.append(boolobject.W_BoolObject.typedef) self.pythontypes.append(longobject.W_LongObject.typedef) + self.pythontypes.append(W_Buffer.typedef) # the set of implementation types self.typeorder = { From noreply at buildbot.pypy.org Tue Mar 18 19:25:27 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 18 Mar 2014 19:25:27 +0100 (CET) Subject: [pypy-commit] pypy default: Organize imports. Message-ID: <20140318182527.CBC0B1C00B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r70058:2d7d9db55899 Date: 2014-03-18 19:24 +0100 http://bitbucket.org/pypy/pypy/changeset/2d7d9db55899/ Log: Organize imports. diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -1,40 +1,36 @@ import os -import sys +from rpython.rlib import jit +from rpython.rlib.objectmodel import specialize +from rpython.rlib.rstring import rsplit +from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.annlowlevel import llhelper + from pypy.interpreter.baseobjspace import W_Root, DescrMismatch -from pypy.objspace.std.typeobject import W_TypeObject, find_best_base +from pypy.interpreter.error import OperationError from pypy.interpreter.typedef import GetSetProperty +from pypy.module.__builtin__.abstractinst import abstract_issubclass_w +from pypy.module.cpyext import structmemberdefs from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - Py_TPFLAGS_HAVE_GETCHARBUFFER, - build_type_checkers, PyObjectFields) + Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers) +from pypy.module.cpyext.methodobject import ( + PyDescr_NewWrapper, PyCFunction_NewEx, PyCFunction_typedef) +from pypy.module.cpyext.modsupport import convert_method_defs from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, - track_reference, RefcountState, borrow_from) -from pypy.interpreter.module import Module -from pypy.module.cpyext import structmemberdefs -from pypy.module.cpyext.modsupport import convert_method_defs + track_reference, RefcountState, borrow_from, Py_DecRef) +from pypy.module.cpyext.slotdefs import ( + slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.module.cpyext.state import State -from pypy.module.cpyext.methodobject import ( - PyDescr_NewWrapper, PyCFunction_NewEx, PyCFunction_typedef) -from pypy.module.cpyext.pyobject import Py_IncRef, Py_DecRef, _Py_Dealloc from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( PyTypeObjectPtr, PyTypeObject, PyGetSetDef, PyMemberDef, newfunc, PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) -from pypy.module.cpyext.slotdefs import ( - slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) -from pypy.module.__builtin__.interp_memoryview import W_Buffer -from pypy.interpreter.error import OperationError -from rpython.rlib.rstring import rsplit -from rpython.rlib.objectmodel import specialize -from pypy.module.__builtin__.abstractinst import abstract_issubclass_w -from pypy.module.__builtin__.interp_classobj import W_ClassObject -from rpython.rlib import jit +from pypy.objspace.std.typeobject import W_TypeObject, find_best_base + WARN_ABOUT_MISSING_SLOT_FUNCTIONS = False @@ -471,7 +467,6 @@ def type_alloc(space, w_metatype): - size = rffi.sizeof(PyHeapTypeObject) metatype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_metatype)) # Don't increase refcount for non-heaptypes if metatype: From noreply at buildbot.pypy.org Tue Mar 18 19:37:42 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 18 Mar 2014 19:37:42 +0100 (CET) Subject: [pypy-commit] pypy default: make test_load_error more resilient by improving the leak detection Message-ID: <20140318183742.B04651C00B9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70059:f777d0ca3f24 Date: 2014-03-18 11:36 -0700 http://bitbucket.org/pypy/pypy/changeset/f777d0ca3f24/ Log: make test_load_error more resilient by improving the leak detection diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -183,6 +183,19 @@ from rpython.rlib.clibffi import get_libc_name cls.w_libc = cls.space.wrap(get_libc_name()) + def setup_method(self, meth): + freeze_refcnts(self) + + def teardown_method(self, meth): + self.cleanup_references(self.space) + # XXX: like AppTestCpythonExtensionBase.teardown_method: + # find out how to disable check_and_print_leaks() if the + # test failed + assert not self.check_and_print_leaks(), ( + "Test leaks or loses object(s). You should also check if " + "the test actually passed in the first place; if it failed " + "it is likely to reach this place.") + def test_load_error(self): import cpyext raises(ImportError, cpyext.load_module, "missing.file", "foo") @@ -358,13 +371,12 @@ for name in self.imported_module_names: self.unimport_module(name) self.cleanup_references(self.space) - if self.check_and_print_leaks(): - assert False, ( - "Test leaks or loses object(s). You should also check if " - "the test actually passed in the first place; if it failed " - "it is likely to reach this place.") - # XXX find out how to disable check_and_print_leaks() if the - # XXX test failed... + # XXX: find out how to disable check_and_print_leaks() if the + # test failed... + assert not self.check_and_print_leaks(), ( + "Test leaks or loses object(s). You should also check if " + "the test actually passed in the first place; if it failed " + "it is likely to reach this place.") class AppTestCpythonExtension(AppTestCpythonExtensionBase): From noreply at buildbot.pypy.org Tue Mar 18 19:48:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 19:48:41 +0100 (CET) Subject: [pypy-commit] pypy default: fix translation Message-ID: <20140318184841.677C61C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70060:131563225cdf Date: 2014-03-18 14:48 -0400 http://bitbucket.org/pypy/pypy/changeset/131563225cdf/ Log: fix translation diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -42,7 +42,6 @@ def buffer_w(self, space): return self.buf - @staticmethod @unwrap_spec(offset=int, size=int) def descr_new(space, w_subtype, w_object, offset=0, size=-1): if space.isinstance_w(w_object, space.w_unicode): @@ -145,7 +144,7 @@ start of the object (or at the specified offset). The slice will extend to the end of the target object (or with the specified size). """, - __new__ = interp2app(W_Buffer.descr_new), + __new__ = interp2app(W_Buffer.descr_new.im_func), __len__ = interp2app(W_Buffer.descr_len), __getitem__ = interp2app(W_Buffer.descr_getitem), __setitem__ = interp2app(W_Buffer.descr_setitem), @@ -176,7 +175,6 @@ def buffer_w(self, space): return self.buf - @staticmethod def descr_new(space, w_subtype, w_object): w_memoryview = W_MemoryView(space.buffer_w(w_object)) return w_memoryview @@ -298,7 +296,7 @@ __doc__ = """\ Create a new memoryview object which references the given object. """, - __new__ = interp2app(W_MemoryView.descr_new), + __new__ = interp2app(W_MemoryView.descr_new.im_func), __eq__ = interp2app(W_MemoryView.descr_eq), __ge__ = interp2app(W_MemoryView.descr_ge), __getitem__ = interp2app(W_MemoryView.descr_getitem), From noreply at buildbot.pypy.org Tue Mar 18 19:57:06 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 19:57:06 +0100 (CET) Subject: [pypy-commit] pypy default: seems these do need to be unique, even in separate classes Message-ID: <20140318185706.AFD861D26D3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70061:9d175a0294eb Date: 2014-03-18 14:56 -0400 http://bitbucket.org/pypy/pypy/changeset/9d175a0294eb/ Log: seems these do need to be unique, even in separate classes diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -42,8 +42,9 @@ def buffer_w(self, space): return self.buf + @staticmethod @unwrap_spec(offset=int, size=int) - def descr_new(space, w_subtype, w_object, offset=0, size=-1): + def descr_new_buffer(space, w_subtype, w_object, offset=0, size=-1): if space.isinstance_w(w_object, space.w_unicode): # unicode objects support the old buffer interface # but not the new buffer interface (change in python 2.7) @@ -144,7 +145,7 @@ start of the object (or at the specified offset). The slice will extend to the end of the target object (or with the specified size). """, - __new__ = interp2app(W_Buffer.descr_new.im_func), + __new__ = interp2app(W_Buffer.descr_new_buffer), __len__ = interp2app(W_Buffer.descr_len), __getitem__ = interp2app(W_Buffer.descr_getitem), __setitem__ = interp2app(W_Buffer.descr_setitem), @@ -175,9 +176,9 @@ def buffer_w(self, space): return self.buf - def descr_new(space, w_subtype, w_object): - w_memoryview = W_MemoryView(space.buffer_w(w_object)) - return w_memoryview + @staticmethod + def descr_new_memoryview(space, w_subtype, w_object): + return W_MemoryView(space.buffer_w(w_object)) def _make_descr__cmp(name): def descr__cmp(self, space, w_other): @@ -296,7 +297,7 @@ __doc__ = """\ Create a new memoryview object which references the given object. """, - __new__ = interp2app(W_MemoryView.descr_new.im_func), + __new__ = interp2app(W_MemoryView.descr_new_memoryview), __eq__ = interp2app(W_MemoryView.descr_eq), __ge__ = interp2app(W_MemoryView.descr_ge), __getitem__ = interp2app(W_MemoryView.descr_getitem), From noreply at buildbot.pypy.org Tue Mar 18 20:25:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 20:25:34 +0100 (CET) Subject: [pypy-commit] pypy default: test/fix buffer init Message-ID: <20140318192534.697C71D2824@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70062:3c1786c06a69 Date: 2014-03-18 15:24 -0400 http://bitbucket.org/pypy/pypy/changeset/3c1786c06a69/ Log: test/fix buffer init diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -195,11 +195,6 @@ return None def buffer_w(self, space): - w_impl = space.lookup(self, '__buffer__') - if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) - if space.isinstance_w(w_result, space.w_buffer): - return w_result.buf self._typed_unwrap_error(space, "buffer") def str_w(self, space): diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/module/__builtin__/test/test_buffer.py --- a/pypy/module/__builtin__/test/test_buffer.py +++ b/pypy/module/__builtin__/test/test_buffer.py @@ -4,6 +4,12 @@ class AppTestBuffer: spaceconfig = dict(usemodules=['array']) + def test_init(self): + class A(object): + def __buffer__(self): + return buffer('123') + raises(TypeError, buffer, A()) + def test_unicode_buffer(self): import sys b = buffer(u"ab") From noreply at buildbot.pypy.org Tue Mar 18 22:27:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 22:27:22 +0100 (CET) Subject: [pypy-commit] pypy default: more buffer cleanups Message-ID: <20140318212722.04DA91C0166@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70063:922c7ee9e9d2 Date: 2014-03-18 17:15 -0400 http://bitbucket.org/pypy/pypy/changeset/922c7ee9e9d2/ Log: more buffer cleanups diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -171,7 +171,7 @@ # very inconsisten on CPython. In PyPy, memoryview supports # the buffer interface, and thus the following comparison # succeeds. See also the comment in - # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer # # Comparison with objects which don't support the buffer API self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py --- a/pypy/interpreter/test/test_buffer.py +++ b/pypy/interpreter/test/test_buffer.py @@ -1,12 +1,10 @@ import py -from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.tool.udir import udir testdir = udir.ensure('test_buffer', dir=1) class TestBuffer: - def test_buffer_w(self): space = self.space w_hello = space.wrap('hello world') @@ -14,14 +12,14 @@ assert buf.getlength() == 11 assert buf.as_str() == 'hello world' assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(W_Buffer(buf)) is buf + assert space.buffer_w(space.newbuffer(buf)) is buf assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(W_Buffer(space.buffer_w(w_hello))) == 'hello world' + assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello))) == 'hello world' space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) def test_file_write(self): space = self.space - w_buffer = W_Buffer(space.buffer_w(space.wrap('hello world'))) + w_buffer = space.newbuffer(space.buffer_w(space.wrap('hello world'))) filename = str(testdir.join('test_file_write')) space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): f = open(filename, 'wb') @@ -42,4 +40,4 @@ space.bufferstr_w, space.wrap(u'\xe9')) -# Note: some app-level tests for buffer are in module/__builtin__/test/. +# Note: some app-level tests for buffer are in objspace/std/test/test_memoryview.py. diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -33,16 +33,11 @@ interpleveldefs = { # constants + '__debug__' : '(space.w_True)', # XXX 'None' : '(space.w_None)', 'False' : '(space.w_False)', 'True' : '(space.w_True)', - '__debug__' : '(space.w_True)', # XXX - 'type' : '(space.w_type)', - 'object' : '(space.w_object)', 'bytes' : '(space.w_str)', - 'unicode' : '(space.w_unicode)', - 'buffer' : 'interp_memoryview.W_Buffer', - 'memoryview' : 'interp_memoryview.W_MemoryView', 'file' : 'state.get(space).w_file', 'open' : 'state.get(space).w_file', diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -24,6 +24,17 @@ else: cls.w_safe_runtimerror = cls.space.wrap(sys.version_info < (2, 6)) + def test_builtin_names(self): + import __builtin__ + assert __builtin__.None is None + assert __builtin__.False is False + assert __builtin__.True is True + + assert __builtin__.buffer is buffer + assert __builtin__.bytes is str + assert __builtin__.dict is dict + assert __builtin__.memoryview is memoryview + def test_bytes_alias(self): assert bytes is str assert isinstance(eval("b'hi'"), str) diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -4,11 +4,9 @@ from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.gateway import unwrap_spec -from pypy.module.__builtin__.interp_memoryview import W_Buffer class ByteBuffer(RWBuffer): - def __init__(self, len): self.data = ['\x00'] * len @@ -24,4 +22,4 @@ @unwrap_spec(length=int) def bytebuffer(space, length): - return W_Buffer(ByteBuffer(length)) + return space.newbuffer(ByteBuffer(length)) diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -4,7 +4,7 @@ from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray -from pypy.module.__builtin__.interp_memoryview import W_Buffer +from pypy.objspace.std.memoryview import W_Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -5,7 +5,6 @@ TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.buffer import RWBuffer -from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -512,7 +511,7 @@ def _raw_read(self, space, buffer, start, length): length = intmask(length) - w_buf = W_Buffer(RawBuffer(buffer, start, length)) + w_buf = space.newbuffer(RawBuffer(buffer, start, length)) while True: try: w_size = space.call_method(self.w_raw, "readinto", w_buf) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -22,10 +22,10 @@ from pypy.interpreter.nestedscope import Cell from pypy.interpreter.module import Module from pypy.interpreter.function import StaticMethod +from pypy.objspace.std.memoryview import W_MemoryView from pypy.objspace.std.sliceobject import W_SliceObject from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject -from pypy.module.__builtin__.interp_memoryview import W_MemoryView from pypy.module.micronumpy.base import W_NDimArray from rpython.rlib.entrypoint import entrypoint_lowlevel from rpython.rlib.rposix import is_valid_fd, validate_fd diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -1,12 +1,12 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.interpreter.buffer import StringBuffer, SubBuffer +from pypy.interpreter.error import OperationError from pypy.module.cpyext.api import ( cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, PyObjectFields, PyObject) from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref -from pypy.interpreter.buffer import StringBuffer, SubBuffer -from pypy.interpreter.error import OperationError -from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.module.array.interp_array import ArrayBuffer +from pypy.objspace.std.memoryview import W_Buffer PyBufferObjectStruct = lltype.ForwardReference() diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -14,7 +14,6 @@ from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State -from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.buffer import Buffer from pypy.interpreter.argument import Arguments @@ -250,7 +249,7 @@ size = generic_cpy_call(space, func_target, w_self, index, ptr) if size < 0: space.fromcache(State).check_and_raise_exception(always=True) - return W_Buffer(CPyBuffer(ptr[0], size, w_self)) + return space.newbuffer(CPyBuffer(ptr[0], size, w_self)) def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -7,7 +7,6 @@ from rpython.rlib.rawstorage import RAW_STORAGE_PTR from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name -from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.module.micronumpy import descriptor, ufuncs, boxes, arrayops, loop, \ support, constants as NPY from pypy.module.micronumpy.appbridge import get_appbridge_cache @@ -607,7 +606,7 @@ return self.implementation.get_buffer(space) def descr_get_data(self, space): - return W_Buffer(self.buffer_w(space)) + return space.newbuffer(self.buffer_w(space)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/objspace/std/memoryview.py rename from pypy/module/__builtin__/interp_memoryview.py rename to pypy/objspace/std/memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/objspace/std/memoryview.py @@ -37,6 +37,7 @@ """ def __init__(self, buf): + assert isinstance(buf, buffer.Buffer) self.buf = buf def buffer_w(self, space): @@ -171,6 +172,7 @@ """ def __init__(self, buf): + assert isinstance(buf, buffer.Buffer) self.buf = buf def buffer_w(self, space): diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -63,7 +63,7 @@ from pypy.objspace.std import unicodeobject from pypy.objspace.std import dictproxyobject from pypy.objspace.std import proxyobject - from pypy.module.__builtin__.interp_memoryview import W_Buffer + from pypy.objspace.std.memoryview import W_Buffer, W_MemoryView import pypy.objspace.std.default # register a few catch-all multimethods import pypy.objspace.std.marshal_impl # install marshal multimethods @@ -84,6 +84,7 @@ self.pythontypes.append(boolobject.W_BoolObject.typedef) self.pythontypes.append(longobject.W_LongObject.typedef) self.pythontypes.append(W_Buffer.typedef) + self.pythontypes.append(W_MemoryView.typedef) # the set of implementation types self.typeorder = { diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -24,6 +24,7 @@ from pypy.objspace.std.iterobject import W_AbstractSeqIterObject from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.longobject import W_LongObject, newlong +from pypy.objspace.std.memoryview import W_Buffer from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.objectobject import W_ObjectObject from pypy.objspace.std.iterobject import W_SeqIterObject @@ -314,6 +315,9 @@ def newseqiter(self, w_obj): return W_SeqIterObject(w_obj) + def newbuffer(self, w_obj): + return W_Buffer(w_obj) + def type(self, w_obj): jit.promote(w_obj.__class__) return w_obj.getclass(self) diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/objspace/std/test/test_memoryview.py rename from pypy/module/__builtin__/test/test_buffer.py rename to pypy/objspace/std/test/test_memoryview.py --- a/pypy/module/__builtin__/test/test_buffer.py +++ b/pypy/objspace/std/test/test_memoryview.py @@ -1,6 +1,3 @@ -"""Tests some behaviour of the buffer type that is not tested in -lib-python/2.5.2/test/test_types.py where the stdlib buffer tests live.""" - class AppTestBuffer: spaceconfig = dict(usemodules=['array']) From noreply at buildbot.pypy.org Tue Mar 18 22:32:50 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 22:32:50 +0100 (CET) Subject: [pypy-commit] pypy default: fake objspace needs newbuffer also Message-ID: <20140318213250.588641C0166@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70064:95ca8c340008 Date: 2014-03-18 17:32 -0400 http://bitbucket.org/pypy/pypy/changeset/95ca8c340008/ Log: fake objspace needs newbuffer also diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -168,6 +168,9 @@ def newseqiter(self, x): return w_some_obj() + def newbuffer(self, x): + return w_some_obj() + def marshal_w(self, w_obj): "NOT_RPYTHON" raise NotImplementedError From noreply at buildbot.pypy.org Tue Mar 18 22:51:29 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 18 Mar 2014 22:51:29 +0100 (CET) Subject: [pypy-commit] buildbot default: add SalsaSalsa_own Message-ID: <20140318215129.781B21D2808@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r913:8fd3d5a743e3 Date: 2014-03-18 23:51 +0200 http://bitbucket.org/pypy/buildbot/changeset/8fd3d5a743e3/ Log: add SalsaSalsa_own diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -385,7 +385,7 @@ 'category' : 'mac64', }, {"name": WIN32, - "slavenames": ["aurora", "SalsaSalsa"], + "slavenames": ["aurora", "SalsaSalsa_own"], "builddir": WIN32, "factory": pypyOwnTestFactoryWin, "locks": [WinSlaveLock.access('counting')], From noreply at buildbot.pypy.org Tue Mar 18 22:55:33 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 18 Mar 2014 22:55:33 +0100 (CET) Subject: [pypy-commit] buildbot default: Backed out changeset: 8fd3d5a743e3 Message-ID: <20140318215533.688041D2818@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r914:aaf1be5bce44 Date: 2014-03-18 23:55 +0200 http://bitbucket.org/pypy/buildbot/changeset/aaf1be5bce44/ Log: Backed out changeset: 8fd3d5a743e3 diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -385,7 +385,7 @@ 'category' : 'mac64', }, {"name": WIN32, - "slavenames": ["aurora", "SalsaSalsa_own"], + "slavenames": ["aurora", "SalsaSalsa"], "builddir": WIN32, "factory": pypyOwnTestFactoryWin, "locks": [WinSlaveLock.access('counting')], From noreply at buildbot.pypy.org Tue Mar 18 23:08:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 23:08:32 +0100 (CET) Subject: [pypy-commit] pypy default: kill unused StringLikeBuffer Message-ID: <20140318220832.D1A901D2818@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70065:b0ba9ef7bb1d Date: 2014-03-18 18:07 -0400 http://bitbucket.org/pypy/pypy/changeset/b0ba9ef7bb1d/ Log: kill unused StringLikeBuffer diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -7,8 +7,7 @@ class Buffer(object): """Abstract base class for buffers.""" - - __slots__ = () # no extra slot here + __slots__ = [] def getlength(self): raise NotImplementedError @@ -29,14 +28,13 @@ def get_raw_address(self): raise ValueError("no raw buffer") - def is_writable(self): return False + class RWBuffer(Buffer): """Abstract base class for read-write buffers.""" - - __slots__ = () # no extra slot here + __slots__ = [] def is_writable(self): return True @@ -51,10 +49,8 @@ self.setitem(start + i, string[i]) - -# ____________________________________________________________ - class StringBuffer(Buffer): + __slots__ = ['value'] def __init__(self, value): self.value = value @@ -76,42 +72,11 @@ return self.value[start:stop] return "".join([self.value[start + i*step] for i in xrange(size)]) - -class StringLikeBuffer(Buffer): - """For app-level objects that already have a string-like interface - with __len__ and a __getitem__ that returns characters or (with - slicing) substrings.""" - # XXX this is inefficient, it should only be used temporarily - - def __init__(self, space, w_obj): - self.space = space - self.w_obj = w_obj - - def getlength(self): - space = self.space - return space.len_w(self.w_obj) - - def getitem(self, index): - space = self.space - s = space.str_w(space.getitem(self.w_obj, space.wrap(index))) - if len(s) != 1: - raise OperationError(space.w_ValueError, - space.wrap("character expected, got string")) - char = s[0] # annotator hint - return char - - def getslice(self, start, stop, step, size): - space = self.space - if step != 1: - raise OperationError(space.w_ValueError, space.wrap( - "buffer object does not support slicing with a step")) - s = space.str_w(space.getslice(self.w_obj, space.wrap(start), - space.wrap(stop))) - return s - # ____________________________________________________________ class SubBufferMixin(object): + _attrs_ = ['buffer', 'offset', 'size'] + def __init__(self, buffer, offset, size): self.buffer = buffer self.offset = offset @@ -135,9 +100,11 @@ # out of bounds return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) + class SubBuffer(Buffer): import_from_mixin(SubBufferMixin) + class RWSubBuffer(RWBuffer): import_from_mixin(SubBufferMixin) From noreply at buildbot.pypy.org Tue Mar 18 23:13:42 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 18 Mar 2014 23:13:42 +0100 (CET) Subject: [pypy-commit] pypy default: fix compilation error that caused msvc to follow mingw32 compile path, with no difference in final error mapping Message-ID: <20140318221342.AF3021C0166@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70066:f2d5c5bf7675 Date: 2014-03-18 23:01 +0200 http://bitbucket.org/pypy/pypy/changeset/f2d5c5bf7675/ Log: fix compilation error that caused msvc to follow mingw32 compile path, with no difference in final error mapping diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -169,6 +169,7 @@ cfile = udir.join('dosmaperr.c') cfile.write(r''' #include + #include #include #ifdef __GNUC__ #define _dosmaperr mingw_dosmaperr From noreply at buildbot.pypy.org Tue Mar 18 23:22:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 18 Mar 2014 23:22:21 +0100 (CET) Subject: [pypy-commit] pypy default: sanity check on platform when swallowing this exception Message-ID: <20140318222221.3A92D1C0166@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70067:90f48765ab97 Date: 2014-03-18 15:21 -0700 http://bitbucket.org/pypy/pypy/changeset/90f48765ab97/ Log: sanity check on platform when swallowing this exception diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -170,7 +170,7 @@ cfile.write(r''' #include #include - #include + #include #ifdef __GNUC__ #define _dosmaperr mingw_dosmaperr #endif @@ -197,6 +197,7 @@ standalone=True) except (CompilationError, WindowsError): # Fallback for the mingw32 compiler + assert static_platform.name == 'mingw32' errors = { 2: 2, 3: 2, 4: 24, 5: 13, 6: 9, 7: 12, 8: 12, 9: 12, 10: 7, 11: 8, 15: 2, 16: 13, 17: 18, 18: 2, 19: 13, 20: 13, 21: 13, From noreply at buildbot.pypy.org Tue Mar 18 23:24:17 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 18 Mar 2014 23:24:17 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140318222417.B44D81C0166@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70068:82be71463fe1 Date: 2014-03-18 15:15 -0700 http://bitbucket.org/pypy/pypy/changeset/82be71463fe1/ Log: merge default diff too long, truncating to 2000 out of 3534 lines diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -38,6 +38,7 @@ if sys.version_info[0] >= 3: StandardError = Exception + cmp = lambda x, y: (x > y) - (x < y) long = int xrange = range basestring = unicode = str diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -105,3 +105,12 @@ .. branch: stdlib-2.7.6 Update stdlib to v2.7.6 + +.. branch: virtual-raw-store-load +Support for virtualizing raw_store/raw_load operations + +.. branch: refactor-buffer-api +Separate the interp-level buffer API from the buffer type exposed to +app-level. The `Buffer` class is now used by `W_MemoryView` and +`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was +an alias to `Buffer`, which was wrappable itself. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -193,6 +193,14 @@ def immutable_unique_id(self, space): return None + def buffer_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.buf + self._typed_unwrap_error(space, "buffer") + def bytes_w(self, space): self._typed_unwrap_error(space, "bytes") @@ -436,14 +444,12 @@ def getbuiltinmodule(self, name, force_init=False): w_name = self.wrap(name) w_modules = self.sys.get('modules') - try: - w_mod = self.getitem(w_modules, w_name) - except OperationError, e: - if not e.match(self, self.w_KeyError): - raise - else: - if not force_init: - return w_mod + if not force_init: + try: + return self.getitem(w_modules, w_name) + except OperationError, e: + if not e.match(self, self.w_KeyError): + raise # If the module is a builtin but not yet imported, # retrieve it and initialize it @@ -454,13 +460,13 @@ "getbuiltinmodule() called with non-builtin module %s", name) else: - # Add the module to sys.modules - self.setitem(w_modules, w_name, w_mod) - # And initialize it from pypy.interpreter.module import Module if isinstance(w_mod, Module): w_mod.init(self) + + # Add the module to sys.modules + self.setitem(w_modules, w_name, w_mod) return w_mod def get_builtinmodule_to_install(self): @@ -1307,10 +1313,7 @@ 'to unsigned int')) def buffer_w(self, w_obj): - # returns a Buffer instance - from pypy.interpreter.buffer import Buffer - w_buffer = self.buffer(w_obj) - return self.interp_w(Buffer, w_buffer) + return w_obj.buffer_w(self) def rwbuffer_w(self, w_obj): # returns a RWBuffer instance @@ -1727,7 +1730,6 @@ ('set', 'set', 3, ['__set__']), ('delete', 'delete', 2, ['__delete__']), ('userdel', 'del', 1, ['__del__']), - ('buffer', 'buffer', 1, ['__buffer__']), # see buffer.py ] ObjSpace.BuiltinModuleTable = [ diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -1,30 +1,12 @@ """ Buffer protocol support. """ +from pypy.interpreter.error import OperationError +from rpython.rlib.objectmodel import import_from_mixin -# The implementation of the buffer protocol. The basic idea is that we -# can ask any app-level object for a 'buffer' view on it, by calling its -# __buffer__() special method. It should return a wrapped instance of a -# subclass of the Buffer class defined below. Note that __buffer__() is -# a PyPy-only extension to the Python language, made necessary by the -# fact that it's not natural in PyPy to hack an interp-level-only -# interface. -# In normal usage, the convenience method space.buffer_w() should be -# used to get directly a Buffer instance. Doing so also gives you for -# free the typecheck that __buffer__() really returned a wrapped Buffer. - -import operator -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash, import_from_mixin -from rpython.rlib.rstring import StringBuilder - - -class Buffer(W_Root): - """Abstract base class for memory views.""" +class Buffer(object): + """Abstract base class for buffers.""" __slots__ = () # no extra slot here @@ -47,94 +29,12 @@ def get_raw_address(self): raise ValueError("no raw buffer") + def is_writable(self): return False - # __________ app-level support __________ - - def descr_len(self, space): - return space.wrap(self.getlength()) - - def descr_getitem(self, space, w_index): - start, stop, step, size = space.decode_index4(w_index, self.getlength()) - if step == 0: # index only - return space.wrapbytes(self.getitem(start)) - res = self.getslice(start, stop, step, size) - return space.wrapbytes(res) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - if not isinstance(self, RWBuffer): - raise OperationError(space.w_TypeError, - space.wrap("buffer is read-only")) - start, stop, step, size = space.decode_index4(w_index, self.getlength()) - if step == 0: # index only - if len(newstring) != 1: - msg = 'buffer[index]=x: x must be a single character' - raise OperationError(space.w_TypeError, space.wrap(msg)) - char = newstring[0] # annotator hint - self.setitem(start, char) - elif step == 1: - if len(newstring) != size: - msg = "right operand length must match slice length" - raise OperationError(space.w_ValueError, space.wrap(msg)) - self.setslice(start, newstring) - else: - raise OperationError(space.w_ValueError, - space.wrap("buffer object does not support" - " slicing with a step")) - - def descr__buffer__(self, space): - return space.wrap(self) - - def descr_str(self, space): - return space.wrap(self.as_str()) - - @unwrap_spec(other='bufferstr') - def descr_add(self, space, other): - return space.wrapbytes(self.as_str() + other) - - def _make_descr__cmp(name): - def descr__cmp(self, space, w_other): - if not isinstance(w_other, Buffer): - return space.w_NotImplemented - # xxx not the most efficient implementation - str1 = self.as_str() - str2 = w_other.as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - descr__cmp.func_name = name - return descr__cmp - - descr_eq = _make_descr__cmp('eq') - descr_ne = _make_descr__cmp('ne') - descr_lt = _make_descr__cmp('lt') - descr_le = _make_descr__cmp('le') - descr_gt = _make_descr__cmp('gt') - descr_ge = _make_descr__cmp('ge') - - def descr_hash(self, space): - return space.wrap(compute_hash(self.as_str())) - - def descr_mul(self, space, w_times): - # xxx not the most efficient implementation - w_string = space.wrapbytes(self.as_str()) - # use the __mul__ method instead of space.mul() so that we - # return NotImplemented instead of raising a TypeError - return space.call_method(w_string, '__mul__', w_times) - - def descr_repr(self, space): - if isinstance(self, RWBuffer): - info = 'read-write buffer' - else: - info = 'read-only buffer' - addrstring = self.getaddrstring(space) - - return space.wrap("<%s for 0x%s, size %d>" % - (info, addrstring, self.getlength())) - - class RWBuffer(Buffer): - """Abstract base class for read-write memory views.""" + """Abstract base class for read-write buffers.""" __slots__ = () # no extra slot here @@ -151,72 +51,6 @@ self.setitem(start + i, string[i]) - at unwrap_spec(offset=int, size=int) -def descr_buffer__new__(space, w_subtype, w_object, offset=0, size=-1): - # w_subtype can only be exactly 'buffer' for now - if not space.is_w(w_subtype, space.gettypefor(Buffer)): - raise OperationError(space.w_TypeError, - space.wrap("argument 1 must be 'buffer'")) - - if space.isinstance_w(w_object, space.w_unicode): - # unicode objects support the old buffer interface - # but not the new buffer interface (change in python 2.7) - from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE - unistr = space.unicode_w(w_object) - builder = StringBuilder(len(unistr) * UNICODE_SIZE) - for unich in unistr: - pack_unichar(unich, builder) - from pypy.interpreter.buffer import StringBuffer - w_buffer = space.wrap(StringBuffer(builder.build())) - else: - w_buffer = space.buffer(w_object) - - buffer = space.interp_w(Buffer, w_buffer) # type-check - if offset == 0 and size == -1: - return w_buffer - # handle buffer slices - if offset < 0: - raise OperationError(space.w_ValueError, - space.wrap("offset must be zero or positive")) - if size < -1: - raise OperationError(space.w_ValueError, - space.wrap("size must be zero or positive")) - if isinstance(buffer, RWBuffer): - buffer = RWSubBuffer(buffer, offset, size) - else: - buffer = SubBuffer(buffer, offset, size) - return space.wrap(buffer) - - -Buffer.typedef = TypeDef( - "buffer", - __doc__ = """\ -buffer(object [, offset[, size]]) - -Create a new buffer object which references the given object. -The buffer will reference a slice of the target object from the -start of the object (or at the specified offset). The slice will -extend to the end of the target object (or with the specified size). -""", - __new__ = interp2app(descr_buffer__new__), - __len__ = interp2app(Buffer.descr_len), - __getitem__ = interp2app(Buffer.descr_getitem), - __setitem__ = interp2app(Buffer.descr_setitem), - __buffer__ = interp2app(Buffer.descr__buffer__), - __str__ = interp2app(Buffer.descr_str), - __add__ = interp2app(Buffer.descr_add), - __eq__ = interp2app(Buffer.descr_eq), - __ne__ = interp2app(Buffer.descr_ne), - __lt__ = interp2app(Buffer.descr_lt), - __le__ = interp2app(Buffer.descr_le), - __gt__ = interp2app(Buffer.descr_gt), - __ge__ = interp2app(Buffer.descr_ge), - __hash__ = interp2app(Buffer.descr_hash), - __mul__ = interp2app(Buffer.descr_mul), - __rmul__ = interp2app(Buffer.descr_mul), - __repr__ = interp2app(Buffer.descr_repr), -) -Buffer.typedef.acceptable_as_base_class = False # ____________________________________________________________ diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py --- a/pypy/interpreter/test/test_buffer.py +++ b/pypy/interpreter/test/test_buffer.py @@ -1,5 +1,5 @@ import py -from pypy.interpreter.buffer import Buffer +from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.tool.udir import udir testdir = udir.ensure('test_buffer', dir=1) @@ -11,13 +11,12 @@ space = self.space w_hello = space.wrapbytes('hello world') buf = space.buffer_w(w_hello) - assert isinstance(buf, Buffer) assert buf.getlength() == 11 assert buf.as_str() == 'hello world' assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.wrap(buf)) is buf + assert space.buffer_w(W_Buffer(buf)) is buf assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.buffer(w_hello)) == 'hello world' + assert space.bufferstr_w(W_Buffer(space.buffer_w(w_hello))) == 'hello world' space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) e = space.raises_w(space.w_TypeError, space.buffer, space.wrap(5)) message = space.unwrap(e.value.get_w_value(space)) @@ -25,7 +24,7 @@ def test_file_write(self): space = self.space - w_buffer = space.buffer(space.wrapbytes('hello world')) + w_buffer = W_Buffer(space.buffer_w(space.wrapbytes('hello world'))) filename = str(testdir.join('test_file_write')) space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): f = open(filename, 'wb') diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -1,22 +1,194 @@ """ Implementation of the 'buffer' and 'memoryview' types. """ +import operator + +from pypy.interpreter import buffer from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import buffer +from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.error import OperationError -import operator +from rpython.rlib.objectmodel import compute_hash +from rpython.rlib.rstring import StringBuilder -class W_MemoryView(W_Root): - """Implement the built-in 'memoryview' type as a thin wrapper around + +def _buffer_setitem(space, buf, w_index, newstring): + start, stop, step, size = space.decode_index4(w_index, buf.getlength()) + if step == 0: # index only + if len(newstring) != 1: + msg = 'buffer[index]=x: x must be a single character' + raise OperationError(space.w_TypeError, space.wrap(msg)) + char = newstring[0] # annotator hint + buf.setitem(start, char) + elif step == 1: + if len(newstring) != size: + msg = "right operand length must match slice length" + raise OperationError(space.w_ValueError, space.wrap(msg)) + buf.setslice(start, newstring) + else: + raise OperationError(space.w_ValueError, + space.wrap("buffer object does not support" + " slicing with a step")) + + +class W_Buffer(W_Root): + """Implement the built-in 'buffer' type as a wrapper around an interp-level buffer. """ def __init__(self, buf): - assert isinstance(buf, buffer.Buffer) self.buf = buf + def buffer_w(self, space): + return self.buf + + @staticmethod + @unwrap_spec(offset=int, size=int) + def descr_new(space, w_subtype, w_object, offset=0, size=-1): + if space.isinstance_w(w_object, space.w_unicode): + # unicode objects support the old buffer interface + # but not the new buffer interface (change in python 2.7) + from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE + unistr = space.unicode_w(w_object) + builder = StringBuilder(len(unistr) * UNICODE_SIZE) + for unich in unistr: + pack_unichar(unich, builder) + from pypy.interpreter.buffer import StringBuffer + buf = StringBuffer(builder.build()) + else: + buf = space.buffer_w(w_object) + + if offset == 0 and size == -1: + return W_Buffer(buf) + # handle buffer slices + if offset < 0: + raise OperationError(space.w_ValueError, + space.wrap("offset must be zero or positive")) + if size < -1: + raise OperationError(space.w_ValueError, + space.wrap("size must be zero or positive")) + if isinstance(buf, buffer.RWBuffer): + buf = buffer.RWSubBuffer(buf, offset, size) + else: + buf = buffer.SubBuffer(buf, offset, size) + return W_Buffer(buf) + + def descr_len(self, space): + return space.wrap(self.buf.getlength()) + + def descr_getitem(self, space, w_index): + start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) + if step == 0: # index only + return space.wrapbytes(self.buf.getitem(start)) + res = self.buf.getslice(start, stop, step, size) + return space.wrapbytes(res) + + @unwrap_spec(newstring='bufferstr') + def descr_setitem(self, space, w_index, newstring): + if not isinstance(self.buf, buffer.RWBuffer): + raise OperationError(space.w_TypeError, + space.wrap("buffer is read-only")) + _buffer_setitem(space, self.buf, w_index, newstring) + + def descr_str(self, space): + return space.wrap(self.buf.as_str()) + + @unwrap_spec(other='bufferstr') + def descr_add(self, space, other): + return space.wrapbytes(self.buf.as_str() + other) + + def _make_descr__cmp(name): + def descr__cmp(self, space, w_other): + if not isinstance(w_other, W_Buffer): + return space.w_NotImplemented + # xxx not the most efficient implementation + str1 = self.buf.as_str() + str2 = w_other.buf.as_str() + return space.wrap(getattr(operator, name)(str1, str2)) + descr__cmp.func_name = name + return descr__cmp + + descr_eq = _make_descr__cmp('eq') + descr_ne = _make_descr__cmp('ne') + descr_lt = _make_descr__cmp('lt') + descr_le = _make_descr__cmp('le') + descr_gt = _make_descr__cmp('gt') + descr_ge = _make_descr__cmp('ge') + + def descr_hash(self, space): + return space.wrap(compute_hash(self.buf.as_str())) + + def descr_mul(self, space, w_times): + # xxx not the most efficient implementation + w_string = space.wrapbytes(self.buf.as_str()) + # use the __mul__ method instead of space.mul() so that we + # return NotImplemented instead of raising a TypeError + return space.call_method(w_string, '__mul__', w_times) + + def descr_repr(self, space): + if isinstance(self.buf, buffer.RWBuffer): + info = 'read-write buffer' + else: + info = 'read-only buffer' + addrstring = self.getaddrstring(space) + + return space.wrap("<%s for 0x%s, size %d>" % + (info, addrstring, self.buf.getlength())) + +W_Buffer.typedef = TypeDef( + "buffer", + __doc__ = """\ +buffer(object [, offset[, size]]) + +Create a new buffer object which references the given object. +The buffer will reference a slice of the target object from the +start of the object (or at the specified offset). The slice will +extend to the end of the target object (or with the specified size). +""", + __new__ = interp2app(W_Buffer.descr_new), + __len__ = interp2app(W_Buffer.descr_len), + __getitem__ = interp2app(W_Buffer.descr_getitem), + __setitem__ = interp2app(W_Buffer.descr_setitem), + __str__ = interp2app(W_Buffer.descr_str), + __add__ = interp2app(W_Buffer.descr_add), + __eq__ = interp2app(W_Buffer.descr_eq), + __ne__ = interp2app(W_Buffer.descr_ne), + __lt__ = interp2app(W_Buffer.descr_lt), + __le__ = interp2app(W_Buffer.descr_le), + __gt__ = interp2app(W_Buffer.descr_gt), + __ge__ = interp2app(W_Buffer.descr_ge), + __hash__ = interp2app(W_Buffer.descr_hash), + __mul__ = interp2app(W_Buffer.descr_mul), + __rmul__ = interp2app(W_Buffer.descr_mul), + __repr__ = interp2app(W_Buffer.descr_repr), +) +W_Buffer.typedef.acceptable_as_base_class = False + + +class W_MemoryView(W_Root): + """Implement the built-in 'memoryview' type as a wrapper around + an interp-level buffer. + """ + + def __init__(self, buf): + self.buf = buf + + def buffer_w(self, space): + """Note that memoryview() is very inconsistent in CPython: it + does not support the buffer interface but does support the new + buffer interface: as a result, it is possible to pass memoryview + to e.g. socket.send() but not to file.write(). For simplicity + and consistency, in PyPy memoryview DOES support buffer(), which + means that it is accepted in more places than CPython. + """ + self._check_released(space) + return self.buf + + @staticmethod + def descr_new(space, w_subtype, w_object): + w_memoryview = W_MemoryView(space.buffer_w(w_object)) + return w_memoryview + def _make_descr__cmp(name): def descr__cmp(self, space, w_other): if self.buf is None: @@ -28,14 +200,14 @@ return space.wrap(getattr(operator, name)(str1, str2)) try: - w_buf = space.buffer(w_other) + buf = space.buffer_w(w_other) except OperationError, e: if not e.match(space, space.w_TypeError): raise return space.w_NotImplemented else: str1 = self.as_str() - str2 = space.buffer_w(w_buf).as_str() + str2 = buf.as_str() return space.wrap(getattr(operator, name)(str1, str2)) descr__cmp.func_name = name return descr__cmp @@ -62,18 +234,6 @@ buf = buffer.SubBuffer(buf, start, size) return W_MemoryView(buf) - def descr_buffer(self, space): - """ - Note that memoryview() is very inconsistent in CPython: it does not - support the buffer interface but does support the new buffer - interface: as a result, it is possible to pass memoryview to - e.g. socket.send() but not to file.write(). For simplicity and - consistency, in PyPy memoryview DOES support buffer(), which means - that it is accepted in more places than CPython. - """ - self._check_released(space) - return space.wrap(self.buf) - def descr_tobytes(self, space): self._check_released(space) return space.wrapbytes(self.as_str()) @@ -102,16 +262,14 @@ @unwrap_spec(newstring='bufferstr') def descr_setitem(self, space, w_index, newstring): self._check_released(space) - buf = self.buf - if isinstance(buf, buffer.RWBuffer): - buf.descr_setitem(space, w_index, newstring) - else: + if not isinstance(self.buf, buffer.RWBuffer): raise OperationError(space.w_TypeError, space.wrap("cannot modify read-only memory")) + _buffer_setitem(space, self.buf, w_index, newstring) def descr_len(self, space): self._check_released(space) - return self.buf.descr_len(space) + return space.wrap(self.buf.getlength()) def w_get_format(self, space): self._check_released(space) @@ -165,17 +323,12 @@ return space.w_None -def descr_new(space, w_subtype, w_object): - memoryview = W_MemoryView(space.buffer(w_object)) - return space.wrap(memoryview) - W_MemoryView.typedef = TypeDef( "memoryview", __doc__ = """\ Create a new memoryview object which references the given object. """, - __new__ = interp2app(descr_new), - __buffer__ = interp2app(W_MemoryView.descr_buffer), + __new__ = interp2app(W_MemoryView.descr_new), __eq__ = interp2app(W_MemoryView.descr_eq), __getitem__ = interp2app(W_MemoryView.descr_getitem), __len__ = interp2app(W_MemoryView.descr_len), diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -4,6 +4,7 @@ from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.gateway import unwrap_spec +from pypy.module.__builtin__.interp_memoryview import W_Buffer class ByteBuffer(RWBuffer): @@ -23,4 +24,4 @@ @unwrap_spec(length=int) def bytebuffer(space, length): - return space.wrap(ByteBuffer(length)) + return W_Buffer(ByteBuffer(length)) diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -4,6 +4,7 @@ from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi @@ -39,35 +40,19 @@ copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) -class MiniBuffer(W_Root): - # a different subclass of W_Root for the MiniBuffer, because we - # want a slightly different (simplified) API at the level of Python. +# Override the typedef to narrow down the interface that's exposed to app-level +class MiniBuffer(W_Buffer): def __init__(self, buffer, keepalive=None): - self.buffer = buffer + W_Buffer.__init__(self, buffer) self.keepalive = keepalive - def descr_len(self, space): - return self.buffer.descr_len(space) - - def descr_getitem(self, space, w_index): - return self.buffer.descr_getitem(space, w_index) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - self.buffer.descr_setitem(space, w_index, newstring) - - def descr__buffer__(self, space): - return self.buffer.descr__buffer__(space) - - MiniBuffer.typedef = TypeDef( "buffer", __module__ = "_cffi_backend", __len__ = interp2app(MiniBuffer.descr_len), __getitem__ = interp2app(MiniBuffer.descr_getitem), __setitem__ = interp2app(MiniBuffer.descr_setitem), - __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), ) MiniBuffer.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -5,6 +5,7 @@ TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.buffer import RWBuffer +from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -521,7 +522,7 @@ def _raw_read(self, space, buffer, start, length): length = intmask(length) - w_buf = space.wrap(RawBuffer(buffer, start, length)) + w_buf = W_Buffer(RawBuffer(buffer, start, length)) while True: try: w_size = space.call_method(self.w_raw, "readinto", w_buf) diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -9,8 +9,12 @@ class AppTestBufferTooShort: spaceconfig = {'usemodules': ['_multiprocessing', 'thread', 'signal', - 'select', 'fcntl', 'struct', - 'binascii']} + 'select', 'struct', 'binascii']} + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_rawffi') + else: + spaceconfig['usemodules'].append('fcntl') + def setup_class(cls): if cls.runappdirect: @@ -75,6 +79,8 @@ 'itertools', '_socket', 'binascii', ] } + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_rawffi') def setup_class(cls): if sys.platform != "win32": @@ -86,7 +92,6 @@ # just for multiprocessing to import correctly on Windows w_modules = space.sys.get('modules') space.setitem(w_modules, space.wrap('msvcrt'), space.sys) - space.setitem(w_modules, space.wrap('_subprocess'), space.sys) else: import _multiprocessing @@ -100,9 +105,12 @@ spaceconfig = { "usemodules": [ '_multiprocessing', 'thread', 'signal', 'struct', 'array', - '_socket', 'binascii', 'select', 'fcntl', - ] + '_socket', 'binascii', 'select' ] } + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_rawffi') + else: + spaceconfig['usemodules'].append('fcntl') def setup_class(cls): cls.w_connections = cls.space.newlist([]) diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -207,7 +207,6 @@ __setitem__ = interp2app(W_ArrayInstance.descr_setitem), __getitem__ = interp2app(W_ArrayInstance.descr_getitem), __len__ = interp2app(W_ArrayInstance.getlength), - __buffer__ = interp2app(W_ArrayInstance.descr_buffer), buffer = GetSetProperty(W_ArrayInstance.getbuffer), shape = interp_attrproperty('shape', W_ArrayInstance), free = interp2app(W_ArrayInstance.free), @@ -232,7 +231,6 @@ __setitem__ = interp2app(W_ArrayInstance.descr_setitem), __getitem__ = interp2app(W_ArrayInstance.descr_getitem), __len__ = interp2app(W_ArrayInstance.getlength), - __buffer__ = interp2app(W_ArrayInstance.descr_buffer), buffer = GetSetProperty(W_ArrayInstance.getbuffer), shape = interp_attrproperty('shape', W_ArrayInstance), byptr = interp2app(W_ArrayInstance.byptr), diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -359,9 +359,9 @@ lltype.free(self.ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) - def descr_buffer(self, space): + def buffer_w(self, space): from pypy.module._rawffi.buffer import RawFFIBuffer - return space.wrap(RawFFIBuffer(self)) + return RawFFIBuffer(self) def getrawsize(self): raise NotImplementedError("abstract base class") diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -364,7 +364,6 @@ __repr__ = interp2app(W_StructureInstance.descr_repr), __getattr__ = interp2app(W_StructureInstance.getattr), __setattr__ = interp2app(W_StructureInstance.setattr), - __buffer__ = interp2app(W_StructureInstance.descr_buffer), buffer = GetSetProperty(W_StructureInstance.getbuffer), free = interp2app(W_StructureInstance.free), shape = interp_attrproperty('shape', W_StructureInstance), @@ -387,7 +386,6 @@ __repr__ = interp2app(W_StructureInstance.descr_repr), __getattr__ = interp2app(W_StructureInstance.getattr), __setattr__ = interp2app(W_StructureInstance.setattr), - __buffer__ = interp2app(W_StructureInstance.descr_buffer), buffer = GetSetProperty(W_StructureInstance.getbuffer), shape = interp_attrproperty('shape', W_StructureInstance), byptr = interp2app(W_StructureInstance.byptr), diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -69,10 +69,7 @@ else: flowinfo = 0 if len(pieces_w) > 3: scope_id = space.uint_w(pieces_w[3]) else: scope_id = 0 - if flowinfo < 0 or flowinfo > 0xfffff: - raise OperationError(space.w_OverflowError, space.wrap( - "flowinfo must be 0-1048575.")) - flowinfo = rffi.cast(lltype.Unsigned, flowinfo) + flowinfo = make_unsigned_flowinfo(space, flowinfo) a = addr.lock(_c.sockaddr_in6) rffi.setintfield(a, 'c_sin6_port', rsocket.htons(port)) rffi.setintfield(a, 'c_sin6_flowinfo', rsocket.htonl(flowinfo)) @@ -101,10 +98,7 @@ else: flowinfo = 0 if len(pieces_w) > 3: scope_id = space.uint_w(pieces_w[3]) else: scope_id = 0 - if flowinfo < 0 or flowinfo > 0xfffff: - raise OperationError(space.w_OverflowError, space.wrap( - "flowinfo must be 0-1048575.")) - flowinfo = rffi.cast(lltype.Unsigned, flowinfo) + flowinfo = make_unsigned_flowinfo(space, flowinfo) return rsocket.INET6Address(host, port, flowinfo, scope_id) if rsocket.HAS_AF_UNIX and family == rsocket.AF_UNIX: return rsocket.UNIXAddress(space.str_w(w_address)) @@ -116,10 +110,16 @@ # XXX Hack to seperate rpython and pypy def make_ushort_port(space, port): if port < 0 or port > 0xffff: - raise OperationError(space.w_ValueError, space.wrap( + raise OperationError(space.w_OverflowError, space.wrap( "port must be 0-65535.")) return rffi.cast(rffi.USHORT, port) +def make_unsigned_flowinfo(space, flowinfo): + if flowinfo < 0 or flowinfo > 0xfffff: + raise OperationError(space.w_OverflowError, space.wrap( + "flowinfo must be 0-1048575.")) + return rffi.cast(lltype.Unsigned, flowinfo) + # XXX Hack to seperate rpython and pypy def ipaddr_from_object(space, w_sockaddr): host = space.str_w(space.getitem(w_sockaddr, space.wrap(0))) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -391,7 +391,7 @@ name = s.getpeername() # Will raise socket.error if not connected assert name[1] == 80 s.close() - + def test_socket_connect_ex(self): import _socket s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) @@ -417,8 +417,13 @@ def test_bigport(self): import _socket s = _socket.socket() - raises(ValueError, s.connect, ("localhost", 1000000)) - raises(ValueError, s.connect, ("localhost", -1)) + exc = raises(OverflowError, s.connect, ("localhost", -1)) + assert "port must be 0-65535." in str(exc.value) + exc = raises(OverflowError, s.connect, ("localhost", 1000000)) + assert "port must be 0-65535." in str(exc.value) + s = _socket.socket(_socket.AF_INET6) + exc = raises(OverflowError, s.connect, ("::1", 1234, 1048576)) + assert "flowinfo must be 0-1048575." in str(exc.value) def test_NtoH(self): import sys @@ -466,6 +471,13 @@ import socket s = socket.socket() + def test_subclass(self): + from _socket import socket + class MySock(socket): + blah = 123 + s = MySock() + assert s.blah == 123 + def test_getsetsockopt(self): import _socket as socket import struct @@ -587,11 +599,11 @@ class AppTestSocketTCP: + HOST = 'localhost' + def setup_class(cls): cls.space = space - HOST = 'localhost' - def setup_method(self, method): w_HOST = space.wrap(self.HOST) self.w_serv = space.appexec([w_HOST], @@ -602,6 +614,7 @@ serv.listen(1) return serv ''') + def teardown_method(self, method): if hasattr(self, 'w_serv'): space.appexec([self.w_serv], '(serv): serv.close()') @@ -622,7 +635,7 @@ raises(error, raise_error) def test_recv_send_timeout(self): - from _socket import socket, timeout + from _socket import socket, timeout, SOL_SOCKET, SO_RCVBUF, SO_SNDBUF cli = socket() cli.connect(self.serv.getsockname()) fileno, addr = self.serv._accept() @@ -643,6 +656,9 @@ assert count is None buf = t.recv(1) assert buf == b'?' + # speed up filling the buffers + t.setsockopt(SOL_SOCKET, SO_RCVBUF, 4096) + cli.setsockopt(SOL_SOCKET, SO_SNDBUF, 4096) # test send() timeout count = 0 try: @@ -671,7 +687,7 @@ conn = socket.socket(fileno=fileno) buf = memoryview(MSG) conn.send(buf) - buf = array.array('b', b' '*1024) + buf = array.array('b', b' ' * 1024) nbytes = cli.recv_into(buf) assert nbytes == len(MSG) msg = buf.tobytes()[:len(MSG)] @@ -687,7 +703,7 @@ conn = socket.socket(fileno=fileno) buf = memoryview(MSG) conn.send(buf) - buf = array.array('b', b' '*1024) + buf = array.array('b', b' ' * 1024) nbytes, addr = cli.recvfrom_into(buf) assert nbytes == len(MSG) msg = buf.tobytes()[:len(MSG)] @@ -698,6 +714,7 @@ cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) assert cli.family == socket.AF_INET + class AppTestErrno: def setup_class(cls): cls.space = space diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -1,7 +1,7 @@ from rpython.tool.udir import udir class AppTestSSL: - spaceconfig = dict(usemodules=('_ssl', '_socket', 'binascii')) + spaceconfig = dict(usemodules=('_ssl', '_socket', 'binascii', 'thread')) def setup_class(cls): import os diff --git a/pypy/module/_ssl/thread_lock.py b/pypy/module/_ssl/thread_lock.py --- a/pypy/module/_ssl/thread_lock.py +++ b/pypy/module/_ssl/thread_lock.py @@ -1,4 +1,5 @@ -from rpython.rlib.ropenssl import * +from rpython.rlib import rthread +from rpython.rlib.ropenssl import libraries from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -22,7 +23,6 @@ # without caring about the GIL. separate_module_source = """ - #include static unsigned int _ssl_locks_count = 0; @@ -62,13 +62,12 @@ } """ -from rpython.rlib import rthread - eci = rthread.eci.merge(ExternalCompilationInfo( separate_module_sources=[separate_module_source], post_include_bits=[ "int _PyPy_SSL_SetupThreads(void);"], export_symbols=['_PyPy_SSL_SetupThreads'], + libraries = libraries, )) _PyPy_SSL_SetupThreads = rffi.llexternal('_PyPy_SSL_SetupThreads', diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -135,6 +135,9 @@ self.len = 0 self.allocated = 0 + def buffer_w(self, space): + return ArrayBuffer(self) + def descr_append(self, space, w_x): """ append(x) @@ -505,9 +508,6 @@ def descr_iter(self, space): return space.wrap(ArrayIterator(self)) - def descr_buffer(self, space): - return space.wrap(ArrayBuffer(self)) - def descr_repr(self, space): if self.len == 0: return space.wrap("array('%s')" % self.typecode) @@ -544,7 +544,6 @@ __radd__ = interp2app(W_ArrayBase.descr_radd), __rmul__ = interp2app(W_ArrayBase.descr_rmul), - __buffer__ = interp2app(W_ArrayBase.descr_buffer), __iter__ = interp2app(W_ArrayBase.descr_iter), __repr__ = interp2app(W_ArrayBase.descr_repr), diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -20,7 +20,7 @@ #define PyArrayObject PyObject #define PyArray_Descr PyObject -extern PyTypeObject PyArray_Type; +PyAPI_DATA(PyTypeObject) PyArray_Type; typedef unsigned char npy_bool; typedef unsigned char npy_uint8; diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -20,4 +20,4 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) w_obj = from_ref(space, view.c_obj) buf = CBuffer(space, view.c_buf, view.c_len, w_obj) - return space.wrap(W_MemoryView(space.wrap(buf))) + return space.wrap(W_MemoryView(buf)) diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -217,7 +217,7 @@ if rffi.aroundstate.after: # After external call is before entering Python rffi.aroundstate.after() - return 0 + return lltype.nullptr(PyGILState_STATE.TO) @cpython_api([PyGILState_STATE], lltype.Void) def PyGILState_Release(space, state): diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -14,8 +14,9 @@ from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State +from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.buffer import Buffer as W_Buffer +from pypy.interpreter.buffer import Buffer from pypy.interpreter.argument import Arguments from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize @@ -228,7 +229,7 @@ check_num_args(space, w_args, 0) return space.wrap(generic_cpy_call(space, func_target, w_self)) -class CPyBuffer(W_Buffer): +class CPyBuffer(Buffer): # Similar to Py_buffer def __init__(self, ptr, size, w_obj): @@ -249,7 +250,7 @@ ret = generic_cpy_call(space, func_target, w_self, view, flags) if rffi.cast(lltype.Signed, ret) == -1: space.fromcache(State).check_and_raise_exception(always=True) - return space.wrap(CPyBuffer(view.c_buf, view.c_len, w_self)) + return W_Buffer(CPyBuffer(view.c_buf, view.c_len, w_self)) def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,6 +64,10 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] + # prevent linking with PythonXX.lib + w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2] + kwds["link_extra"] = ["/NODEFAULTLIB:Python%d%d.lib" % + (space.int_w(w_maj), space.int_w(w_min))] elif sys.platform == 'darwin': kwds["link_files"] = [str(api_library + '.dylib')] else: @@ -181,6 +185,19 @@ state = cls.space.fromcache(RefcountState) state.non_heaptypes_w[:] = [] + def setup_method(self, meth): + freeze_refcnts(self) + + def teardown_method(self, meth): + self.cleanup_references(self.space) + # XXX: like AppTestCpythonExtensionBase.teardown_method: + # find out how to disable check_and_print_leaks() if the + # test failed + assert not self.check_and_print_leaks(), ( + "Test leaks or loses object(s). You should also check if " + "the test actually passed in the first place; if it failed " + "it is likely to reach this place.") + def test_load_error(self): import cpyext raises(ImportError, cpyext.load_module, "missing.file", "foo") @@ -368,13 +385,12 @@ for name in self.imported_module_names: self.unimport_module(name) self.cleanup_references(self.space) - if self.check_and_print_leaks(): - assert False, ( - "Test leaks or loses object(s). You should also check if " - "the test actually passed in the first place; if it failed " - "it is likely to reach this place.") - # XXX find out how to disable check_and_print_leaks() if the - # XXX test failed... + # XXX: find out how to disable check_and_print_leaks() if the + # test failed... + assert not self.check_and_print_leaks(), ( + "Test leaks or loses object(s). You should also check if " + "the test actually passed in the first place; if it failed " + "it is likely to reach this place.") class AppTestCpythonExtension(AppTestCpythonExtensionBase): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -1,38 +1,36 @@ import os -import sys +from rpython.rlib import jit +from rpython.rlib.objectmodel import specialize +from rpython.rlib.rstring import rsplit +from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.annlowlevel import llhelper + from pypy.interpreter.baseobjspace import W_Root, DescrMismatch -from pypy.objspace.std.typeobject import W_TypeObject, find_best_base +from pypy.interpreter.error import OperationError from pypy.interpreter.typedef import GetSetProperty +from pypy.module.__builtin__.abstractinst import abstract_issubclass_w +from pypy.module.cpyext import structmemberdefs from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - Py_TPFLAGS_HAVE_GETCHARBUFFER, - build_type_checkers, PyObjectFields, Py_buffer) + Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers, Py_buffer) +from pypy.module.cpyext.methodobject import ( + PyDescr_NewWrapper, PyCFunction_NewEx, PyCFunction_typedef) +from pypy.module.cpyext.modsupport import convert_method_defs from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, - track_reference, RefcountState, borrow_from) -from pypy.interpreter.module import Module -from pypy.module.cpyext import structmemberdefs -from pypy.module.cpyext.modsupport import convert_method_defs + track_reference, RefcountState, borrow_from, Py_DecRef) +from pypy.module.cpyext.slotdefs import ( + slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.module.cpyext.state import State -from pypy.module.cpyext.methodobject import ( - PyDescr_NewWrapper, PyCFunction_NewEx, PyCFunction_typedef) -from pypy.module.cpyext.pyobject import Py_IncRef, Py_DecRef, _Py_Dealloc from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( PyTypeObjectPtr, PyTypeObject, PyGetSetDef, PyMemberDef, newfunc, PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) -from pypy.module.cpyext.slotdefs import ( - slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) -from pypy.interpreter.error import OperationError -from rpython.rlib.rstring import rsplit -from rpython.rlib.objectmodel import specialize -from pypy.module.__builtin__.abstractinst import abstract_issubclass_w -from rpython.rlib import jit +from pypy.objspace.std.typeobject import W_TypeObject, find_best_base + WARN_ABOUT_MISSING_SLOT_FUNCTIONS = False @@ -421,7 +419,6 @@ def type_alloc(space, w_metatype): - size = rffi.sizeof(PyHeapTypeObject) metatype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_metatype)) # Don't increase refcount for non-heaptypes if metatype: diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -243,12 +243,10 @@ fromlist_w = None rel_modulename = None - if (level != 0 and - w_globals is not None and - space.isinstance_w(w_globals, space.w_dict)): - - rel_modulename, rel_level = _get_relative_name(space, modulename, level, w_globals) - + if (level != 0 and w_globals is not None and + space.isinstance_w(w_globals, space.w_dict)): + rel_modulename, rel_level = _get_relative_name(space, modulename, level, + w_globals) if rel_modulename: # if no level was set, ignore import errors, and # fall back to absolute import at the end of the @@ -582,6 +580,7 @@ def load_module(space, w_modulename, find_info, reuse=False): if find_info is None: return + if find_info.w_loader: return space.call_method(find_info.w_loader, "load_module", w_modulename) @@ -607,7 +606,7 @@ try: if find_info.modtype == PY_SOURCE: load_source_module( - space, w_modulename, w_mod, + space, w_modulename, w_mod, find_info.filename, find_info.stream.readall(), find_info.stream.try_to_find_file_descriptor()) return w_mod diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -232,7 +232,6 @@ import _warnings def simplefilter(action, category): _warnings.filters.insert(0, (action, None, category, None, 0)) - simplefilter('error', ImportWarning) try: raises(ImportWarning, imp) @@ -467,7 +466,7 @@ def test_future_relative_import_level_1(self): from pkg import relative_c assert relative_c.inpackage == 1 - + def test_future_relative_import_level_2(self): from pkg.pkg1 import relative_d assert relative_d.inpackage == 1 @@ -751,10 +750,7 @@ import imp import pkg import os - - info = ('.py', 'r', imp.PY_SOURCE) pathname = os.path.join(os.path.dirname(pkg.__file__), 'a.py') - module = imp.load_module('a', open(pathname), 'invalid_path_name', ('.py', 'r', imp.PY_SOURCE)) assert module.__name__ == 'a' @@ -1284,7 +1280,7 @@ def load_module(self, name): sys.modules[name] = sys return sys - + def importer_for_path(path): if path == "xxx": return Importer() diff --git a/pypy/module/marshal/__init__.py b/pypy/module/marshal/__init__.py --- a/pypy/module/marshal/__init__.py +++ b/pypy/module/marshal/__init__.py @@ -8,7 +8,7 @@ appleveldefs = { } - + interpleveldefs = { 'dump' : 'interp_marshal.dump', 'dumps' : 'interp_marshal.dumps', diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -335,8 +335,8 @@ def descr_copy(self, space): return self.convert_to(space, self.get_dtype(space)) - def descr_buffer(self, space): - return self.descr_ravel(space).descr_get_data(space) + def buffer_w(self, space): + return self.descr_ravel(space).buffer_w(space) def descr_byteswap(self, space): return self.get_dtype(space).itemtype.byteswap(self) @@ -547,7 +547,6 @@ __bool__ = interp2app(W_GenericBox.descr_nonzero), __oct__ = interp2app(W_GenericBox.descr_oct), __hex__ = interp2app(W_GenericBox.descr_hex), - __buffer__ = interp2app(W_GenericBox.descr_buffer), __add__ = interp2app(W_GenericBox.descr_add), __sub__ = interp2app(W_GenericBox.descr_sub), diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -7,6 +7,7 @@ from rpython.rlib.rawstorage import RAW_STORAGE_PTR from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name +from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.module.micronumpy import descriptor, ufuncs, boxes, arrayops, loop, \ support, constants as NPY from pypy.module.micronumpy.appbridge import get_appbridge_cache @@ -602,8 +603,11 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "ctypes not implemented yet")) + def buffer_w(self, space): + return self.implementation.get_buffer(space) + def descr_get_data(self, space): - return self.implementation.get_buffer(space) + return W_Buffer(self.buffer_w(space)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): @@ -1248,7 +1252,6 @@ __float__ = interp2app(W_NDimArray.descr_float), __hex__ = interp2app(W_NDimArray.descr_hex), __oct__ = interp2app(W_NDimArray.descr_oct), - __buffer__ = interp2app(W_NDimArray.descr_get_data), __index__ = interp2app(W_NDimArray.descr_index), __pos__ = interp2app(W_NDimArray.descr_pos), diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -480,23 +480,19 @@ def test_flat_setitem(self): result = self.run("flat_setitem") assert result == 1.0 - py.test.skip("don't run for now") self.check_trace_count(1) - # XXX not ideal, but hey, let's ignore it for now - self.check_simple_loop({'raw_load': 1, - 'raw_store': 1, - 'int_lt': 1, - 'int_gt': 1, - 'int_add': 4, - 'guard_true': 2, - 'arraylen_gc': 2, - 'jump': 1, - 'int_sub': 1, - # XXX bad part - 'int_and': 1, - 'int_mod': 1, - 'int_rshift': 1, - }) + self.check_simple_loop({ + 'call': 2, + 'getfield_gc': 2, + 'guard_no_exception': 2, + 'guard_not_invalidated': 1, + 'guard_true': 1, + 'int_gt': 1, + 'int_sub': 1, + 'jump': 1, + 'raw_load': 1, + 'raw_store': 1, + }) def define_dot(): return """ @@ -509,6 +505,7 @@ def test_dot(self): result = self.run("dot") assert result == 184 + self.check_trace_count(3) self.check_simple_loop({'float_add': 1, 'float_mul': 1, 'guard_not_invalidated': 1, @@ -526,7 +523,7 @@ 'guard_class': 4, 'guard_false': 2, 'guard_no_exception': 3, - 'guard_nonnull': 8, + 'guard_nonnull': 12, 'guard_nonnull_class': 4, 'guard_not_invalidated': 2, 'guard_true': 9, diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -17,6 +17,10 @@ self.space = space self.mmap = mmap_obj + def buffer_w(self, space): + self.check_valid() + return MMapBuffer(self.space, self.mmap) + def close(self): self.mmap.close() @@ -202,10 +206,6 @@ self.mmap.setitem(start, value[i]) start += step - def descr_buffer(self): - self.check_valid() - return self.space.wrap(MMapBuffer(self.space, self.mmap)) - def descr_enter(self, space): self.check_valid() return space.wrap(self) @@ -270,7 +270,6 @@ __len__ = interp2app(W_MMap.__len__), __getitem__ = interp2app(W_MMap.descr_getitem), __setitem__ = interp2app(W_MMap.descr_setitem), - __buffer__ = interp2app(W_MMap.descr_buffer), __enter__ = interp2app(W_MMap.descr_enter), __exit__ = interp2app(W_MMap.descr_exit), diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -177,17 +177,9 @@ import sys import errno for fn in [self.posix.stat, self.posix.lstat]: - try: - fn("nonexistentdir/nonexistentfile") - except OSError as e: - assert e.errno == errno.ENOENT - assert e.filename == "nonexistentdir/nonexistentfile" - # On Windows, when the parent directory does not exist, - # the winerror is 3 (cannot find the path specified) - # instead of 2 (cannot find the file specified) - if sys.platform == 'win32': - assert isinstance(e, WindowsError) - assert e.winerror == 3 + exc = raises(OSError, fn, "nonexistentdir/nonexistentfile") + assert exc.value.errno == errno.ENOENT + assert exc.value.filename == "nonexistentdir/nonexistentfile" if hasattr(__import__(os.name), "statvfs"): def test_statvfs(self): diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -1,8 +1,8 @@ import os -import py +import pytest if os.name != 'posix': - py.test.skip('pwd module only available on unix') + pytest.skip('pwd module only available on unix') class AppTestPwd: spaceconfig = dict(usemodules=['pwd']) diff --git a/pypy/module/pwd/test/test_ztranslation.py b/pypy/module/pwd/test/test_ztranslation.py --- a/pypy/module/pwd/test/test_ztranslation.py +++ b/pypy/module/pwd/test/test_ztranslation.py @@ -1,5 +1,9 @@ +import os +import pytest from pypy.objspace.fake.checkmodule import checkmodule +if os.name != 'posix': + pytest.skip('pwd module only available on unix') def test_checkmodule(): checkmodule('pwd') diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -428,7 +428,7 @@ self.handlers = [None] * NB_HANDLERS - self.buffer_w = None + self.buffer = None self.buffer_size = 8192 self.buffer_used = 0 self.w_character_data_handler = None @@ -537,7 +537,7 @@ def buffer_string(self, space, w_string, length): ll_length = rffi.cast(lltype.Signed, length) - if self.buffer_w is not None: + if self.buffer is not None: if self.buffer_used + ll_length > self.buffer_size: self.flush_character_buffer(space) # handler might have changed; drop the rest on the floor @@ -545,11 +545,11 @@ if self.w_character_data_handler is None: return True if ll_length <= self.buffer_size: - self.buffer_w.append(w_string) + self.buffer.append(w_string) self.buffer_used += ll_length return True else: - self.buffer_w = [] + self.buffer = [] self.buffer_used = 0 return False @@ -680,12 +680,12 @@ return space.wrap(parser) def flush_character_buffer(self, space): - if not self.buffer_w: + if not self.buffer: return w_data = space.call_function( space.getattr(space.wrap(''), space.wrap('join')), - space.newlist(self.buffer_w)) - self.buffer_w = [] + space.newlist(self.buffer)) + self.buffer = [] self.buffer_used = 0 if self.w_character_data_handler: @@ -730,14 +730,14 @@ self.buffer_size = value def get_buffer_text(self, space): - return space.wrap(self.buffer_w is not None) + return space.wrap(self.buffer is not None) def set_buffer_text(self, space, w_value): if space.is_true(w_value): - self.buffer_w = [] + self.buffer = [] self.buffer_used = 0 else: self.flush_character_buffer(space) - self.buffer_w = None + self.buffer = None def get_intern(self, space): if self.w_intern: diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -207,14 +207,13 @@ ... """) ops = loop.ops_by_id('cfficall') - assert 'raw_malloc' not in str(ops) - assert 'raw_free' not in str(ops) - assert 'getarrayitem_raw' not in log.opnames(ops) - assert 'setarrayitem_raw' not in log.opnames(ops) + for name in ['raw_malloc', 'raw_free']: + assert name not in str(ops) + for name in ['raw_load', 'raw_store', 'getarrayitem_raw', 'setarrayitem_raw']: + assert name not in log.opnames(ops) # so far just check that call_release_gil() is produced. # later, also check that the arguments to call_release_gil() # are constants - # are constants, and that the numerous raw_mallocs are removed def test_cffi_call_guard_not_forced_fails(self): # this is the test_pypy_c equivalent of @@ -341,13 +340,11 @@ guard_value(p166, ConstPtr(ptr72), descr=...) p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=) guard_no_exception(descr=...) - i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) - setfield_gc(p167, 0, descr=) - setfield_gc(p167, ConstPtr(ptr86), descr=) - guard_no_exception(descr=...) + i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) i169 = int_add(i168, i97) i170 = int_sub(i160, i106) setfield_gc(p167, i168, descr=) + setfield_gc(p167, ConstPtr(null), descr=) setfield_gc(p167, ConstPtr(ptr89), descr=) i171 = uint_gt(i170, i108) guard_false(i171, descr=...) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -302,6 +302,11 @@ ec._py_repr = None return ec + def buffer_w(self, w_obj): + from pypy.interpreter.buffer import Buffer + is_root(w_obj) + return Buffer() + def unicode_from_object(self, w_obj): return w_some_obj() diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -28,6 +28,9 @@ """representation for debugging purposes""" return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) + def buffer_w(w_self, space): + return BytearrayBuffer(w_self.data) + def _new(self, value): return W_BytearrayObject(_make_data(value)) @@ -240,9 +243,6 @@ raise return space.newbool(res) - def descr_buffer(self, space): - return BytearrayBuffer(self.data) - def descr_inplace_add(self, space, w_other): if isinstance(w_other, W_BytearrayObject): self.data += w_other.data @@ -941,7 +941,6 @@ __init__ = interp2app(W_BytearrayObject.descr_init, doc=BytearrayDocstrings.__init__.__doc__), - __buffer__ = interp2app(W_BytearrayObject.descr_buffer), __iadd__ = interp2app(W_BytearrayObject.descr_inplace_add, doc=BytearrayDocstrings.__iadd__.__doc__), diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -379,9 +379,6 @@ of the specified width. The string S is never truncated. """ - def descr_buffer(self, space): - pass - class W_BytesObject(W_AbstractBytesObject): import_from_mixin(StringMethods) @@ -401,6 +398,9 @@ def bytes_w(w_self, space): return w_self._value + def buffer_w(w_self, space): + return StringBuffer(w_self._value) + def listview_bytes(self): return _create_list_from_bytes(self._value) @@ -521,9 +521,6 @@ x = compute_hash(self._value) return space.wrap(x) - def descr_buffer(self, space): - return space.wrap(StringBuffer(self._value)) - def descr_eq(self, space, w_other): if space.config.objspace.std.withstrbuf: from pypy.objspace.std.strbufobject import W_StringBufferObject @@ -837,7 +834,6 @@ upper = interpindirect2app(W_AbstractBytesObject.descr_upper), zfill = interpindirect2app(W_AbstractBytesObject.descr_zfill), - __buffer__ = interpindirect2app(W_AbstractBytesObject.descr_buffer), __getnewargs__ = interpindirect2app( W_AbstractBytesObject.descr_getnewargs), diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -889,7 +889,7 @@ else: # If object supports the buffer interface try: - w_buffer = space.buffer(w_value) + buf = space.buffer_w(w_value) except OperationError as e: if not e.match(space, space.w_TypeError): raise @@ -897,7 +897,6 @@ "int() argument must be a string or a number, " "not '%T'", w_value) else: - buf = space.interp_w(Buffer, w_buffer) return _string_to_int_or_long(space, w_inttype, w_value, buf.as_str()) else: diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -61,6 +61,7 @@ from pypy.objspace.std import unicodeobject from pypy.objspace.std import dictproxyobject from pypy.objspace.std import proxyobject + from pypy.module.__builtin__.interp_memoryview import W_Buffer import pypy.objspace.std.default # register a few catch-all multimethods import pypy.objspace.std.marshal_impl # install marshal multimethods @@ -79,6 +80,7 @@ self.pythontypes.append(intobject.W_IntObject.typedef) self.pythontypes.append(boolobject.W_BoolObject.typedef) self.pythontypes.append(longobject.W_LongObject.typedef) + self.pythontypes.append(W_Buffer.typedef) # the set of implementation types self.typeorder = { diff --git a/pypy/objspace/std/strbufobject.py b/pypy/objspace/std/strbufobject.py --- a/pypy/objspace/std/strbufobject.py +++ b/pypy/objspace/std/strbufobject.py @@ -2,11 +2,12 @@ import py -from pypy.objspace.std.bytesobject import W_AbstractBytesObject, W_BytesObject -from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.objspace.std.bytesobject import (W_AbstractBytesObject, + W_BytesObject, StringBuffer) from pypy.interpreter.gateway import interp2app, unwrap_spec from rpython.rlib.rstring import StringBuilder + class W_StringBufferObject(W_AbstractBytesObject): w_str = None @@ -35,6 +36,9 @@ def bytes_w(self, space): return self.force() + def buffer_w(self, space): + return StringBuffer(self.force()) + def descr_len(self, space): return space.wrap(self.length) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2872,7 +2872,6 @@ py.test.raises(Exception, a.build_types, fun, [s_nonneg, int]) def test_sig_bug(self): - py.test.skip("_annenforceargs_ does not work for default arguments") def g(x, y=5): return y == 5 g._annenforceargs_ = (int, int) @@ -2880,7 +2879,8 @@ return g(x) a = self.RPythonAnnotator() s = a.build_types(fun, [int]) - assert not s.is_constant() + assert s.knowntype is bool + assert s.is_constant() def test_sig_list(self): def g(buf): diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -547,20 +547,18 @@ track_allocation = d.pop('track_allocation', True) if d: raise UnsupportedMallocFlags(d) - TYPE = op.args[0].value if zero: name += '_zero' if add_memory_pressure: name += '_add_memory_pressure' if not track_allocation: name += '_no_track_allocation' + TYPE = op.args[0].value op1 = self.prepare_builtin_call(op, name, args, (TYPE,), TYPE) - if name == 'raw_malloc_varsize': - ITEMTYPE = op.args[0].value.OF - if ITEMTYPE == lltype.Char: - return self._handle_oopspec_call(op1, args, - EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR, - EffectInfo.EF_CAN_RAISE) + if name.startswith('raw_malloc_varsize') and TYPE.OF == lltype.Char: + return self._handle_oopspec_call(op1, args, + EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR, + EffectInfo.EF_CAN_RAISE) return self.rewrite_op_direct_call(op1) def rewrite_op_malloc_varsize(self, op): @@ -591,7 +589,7 @@ name += '_no_track_allocation' op1 = self.prepare_builtin_call(op, name, [op.args[0]], (STRUCT,), STRUCT) - if name == 'raw_free': + if name.startswith('raw_free'): return self._handle_oopspec_call(op1, [op.args[0]], EffectInfo.OS_RAW_FREE, EffectInfo.EF_CANNOT_RAISE) @@ -837,8 +835,8 @@ RESULT = lltype.Ptr(STRUCT) assert RESULT == op.result.concretetype return self._do_builtin_call(op, 'alloc_with_del', [], - extra = (RESULT, vtable), - extrakey = STRUCT) + extra=(RESULT, vtable), + extrakey=STRUCT) heaptracker.register_known_gctype(self.cpu, vtable, STRUCT) opname = 'new_with_vtable' else: @@ -1237,7 +1235,7 @@ op1 = self.prepare_builtin_call(op, "llong_%s", args) op2 = self._handle_oopspec_call(op1, args, EffectInfo.OS_LLONG_%s, - EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) if %r == "TO_INT": assert op2.result.concretetype == lltype.Signed return op2 @@ -1269,7 +1267,7 @@ op1 = self.prepare_builtin_call(op, "ullong_%s", args) op2 = self._handle_oopspec_call(op1, args, EffectInfo.OS_LLONG_%s, - EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) return op2 ''' % (_op, _oopspec.lower(), _oopspec)).compile() diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -60,7 +60,7 @@ class FakeResidualCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op, **kwds): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None): return 'calldescr' def calldescr_canraise(self, calldescr): return True diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -93,6 +93,11 @@ # possible aliasing). self.clear() self._lazy_setfield = None + if optheap.postponed_op: + for a in op.getarglist(): + if a is optheap.postponed_op.result: + optheap.emit_postponed_op() + break optheap.next_optimization.propagate_forward(op) if not can_cache: return @@ -179,6 +184,9 @@ def flush(self): self.force_all_lazy_setfields_and_arrayitems() + self.emit_postponed_op() + + def emit_postponed_op(self): if self.postponed_op: postponed_op = self.postponed_op self.postponed_op = None @@ -227,10 +235,7 @@ def emit_operation(self, op): self.emitting_operation(op) - if self.postponed_op: - postponed_op = self.postponed_op - self.postponed_op = None - self.next_optimization.propagate_forward(postponed_op) + self.emit_postponed_op() if (op.is_comparison() or op.getopnum() == rop.CALL_MAY_FORCE or op.is_ovf()): self.postponed_op = op diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1660,6 +1660,16 @@ """ self.optimize_loop(ops, ops) + def test_setfield_int_eq_result(self): + # test that the setfield_gc does not end up before int_eq + ops = """ + [p1, i1, i2] + i3 = int_eq(i1, i2) + setfield_gc(p1, i3, descr=valuedescr) + jump(p1, i1, i2) + """ + self.optimize_loop(ops, ops) + def test_duplicate_setfield_aliasing(self): # a case where aliasing issues (and not enough cleverness) mean # that we fail to remove any setfield_gc diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1,14 +1,15 @@ import py from rpython.rlib.objectmodel import instantiate +from rpython.jit.metainterp import compile, resume +from rpython.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt, TreeLoop +from rpython.jit.metainterp.optimize import InvalidLoop +from rpython.jit.metainterp.optimizeopt import build_opt_chain from rpython.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin, BaseTest, convert_old_style_to_targets) -from rpython.jit.metainterp.optimizeopt import build_opt_chain -from rpython.jit.metainterp.optimize import InvalidLoop -from rpython.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt -from rpython.jit.metainterp.history import TreeLoop -from rpython.jit.metainterp import compile, resume +from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import \ + FakeMetaInterpStaticData from rpython.jit.metainterp.resoperation import rop, opname, oparity -from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import FakeMetaInterpStaticData + def test_build_opt_chain(): def check(chain, expected_names): @@ -40,7 +41,6 @@ class BaseTestWithUnroll(BaseTest): - enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" def optimize_loop(self, ops, expected, expected_preamble=None, @@ -93,8 +93,8 @@ def raises(self, e, fn, *args): return py.test.raises(e, fn, *args).value + class OptimizeOptTest(BaseTestWithUnroll): - def setup_method(self, meth=None): class FailDescr(compile.ResumeGuardDescr): oparse = None @@ -130,7 +130,6 @@ self.namespace.pop('fdescr', None) self.namespace.pop('fdescr2', None) - def test_simple(self): ops = """ [] @@ -606,9 +605,9 @@ i1 = ptr_eq(p0, NULL) guard_false(i1) [] i2 = ptr_ne(NULL, p0) - guard_true(i0) [] + guard_true(i2) [] i3 = ptr_eq(NULL, p0) - guard_false(i1) [] + guard_false(i3) [] guard_nonnull(p0) [] jump(p0) """ @@ -623,6 +622,30 @@ """ self.optimize_loop(ops, expected, preamble) + def test_nonnull_2(self): From noreply at buildbot.pypy.org Tue Mar 18 23:24:19 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 18 Mar 2014 23:24:19 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140318222419.299ED1C0166@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70069:fb381945ef16 Date: 2014-03-18 15:23 -0700 http://bitbucket.org/pypy/pypy/changeset/fb381945ef16/ Log: merge default diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -171,7 +171,7 @@ # very inconsisten on CPython. In PyPy, memoryview supports # the buffer interface, and thus the following comparison # succeeds. See also the comment in - # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer # # Comparison with objects which don't support the buffer API self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -194,11 +194,6 @@ return None def buffer_w(self, space): - w_impl = space.lookup(self, '__buffer__') - if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) - if space.isinstance_w(w_result, space.w_buffer): - return w_result.buf self._typed_unwrap_error(space, "buffer") def bytes_w(self, space): diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -7,8 +7,7 @@ class Buffer(object): """Abstract base class for buffers.""" - - __slots__ = () # no extra slot here + __slots__ = [] def getlength(self): raise NotImplementedError @@ -29,14 +28,13 @@ def get_raw_address(self): raise ValueError("no raw buffer") - def is_writable(self): return False + class RWBuffer(Buffer): """Abstract base class for read-write buffers.""" - - __slots__ = () # no extra slot here + __slots__ = [] def is_writable(self): return True @@ -51,10 +49,8 @@ self.setitem(start + i, string[i]) - -# ____________________________________________________________ - class StringBuffer(Buffer): + __slots__ = ['value'] def __init__(self, value): self.value = value @@ -76,48 +72,11 @@ return self.value[start:stop] return "".join([self.value[start + i*step] for i in xrange(size)]) - -class StringLikeBuffer(Buffer): - """For app-level objects that already have a string-like interface - with __len__ and a __getitem__ that returns characters or (with - slicing) substrings.""" - # XXX this is inefficient, it should only be used temporarily - - def __init__(self, space, w_obj): - self.space = space - self.w_obj = w_obj - - def getlength(self): - space = self.space - return space.len_w(self.w_obj) - - def getitem(self, index): - space = self.space - w_value = space.getitem(self.w_obj, space.wrap(index)) - try: - return chr(space.int_w(w_value)) - except OperationError as e: - if not e.match(space, space.w_TypeError): - raise - s = space.bytes_w(w_value) - if len(s) != 1: - raise OperationError(space.w_ValueError, - space.wrap("single byte expected, got string")) - char = s[0] # annotator hint - return char - - def getslice(self, start, stop, step, size): - space = self.space - if step != 1: - raise OperationError(space.w_ValueError, space.wrap( - "buffer object does not support slicing with a step")) - s = space.str_w(space.getslice(self.w_obj, space.wrap(start), - space.wrap(stop))) - return s - # ____________________________________________________________ class SubBufferMixin(object): + _attrs_ = ['buffer', 'offset', 'size'] + def __init__(self, buffer, offset, size): self.buffer = buffer self.offset = offset @@ -141,9 +100,11 @@ # out of bounds return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) + class SubBuffer(Buffer): import_from_mixin(SubBufferMixin) + class RWSubBuffer(RWBuffer): import_from_mixin(SubBufferMixin) diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py --- a/pypy/interpreter/test/test_buffer.py +++ b/pypy/interpreter/test/test_buffer.py @@ -1,12 +1,10 @@ import py -from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.tool.udir import udir testdir = udir.ensure('test_buffer', dir=1) class TestBuffer: - def test_buffer_w(self): space = self.space w_hello = space.wrapbytes('hello world') @@ -14,9 +12,9 @@ assert buf.getlength() == 11 assert buf.as_str() == 'hello world' assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(W_Buffer(buf)) is buf + assert space.buffer_w(space.newbuffer(buf)) is buf assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(W_Buffer(space.buffer_w(w_hello))) == 'hello world' + assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello))) == 'hello world' space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) e = space.raises_w(space.w_TypeError, space.buffer, space.wrap(5)) message = space.unwrap(e.value.get_w_value(space)) @@ -24,7 +22,7 @@ def test_file_write(self): space = self.space - w_buffer = W_Buffer(space.buffer_w(space.wrapbytes('hello world'))) + w_buffer = space.newbuffer(space.buffer_w(space.wrapbytes('hello world'))) filename = str(testdir.join('test_file_write')) space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): f = open(filename, 'wb') @@ -36,4 +34,4 @@ f.close() assert data == 'hello world' -# Note: some app-level tests for buffer are in module/__builtin__/test/. +# Note: some app-level tests for buffer are in objspace/std/test/test_memoryview.py. diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -29,14 +29,10 @@ interpleveldefs = { # constants + '__debug__' : '(space.w_True)', # XXX 'None' : '(space.w_None)', 'False' : '(space.w_False)', 'True' : '(space.w_True)', - '__debug__' : '(space.w_True)', # XXX - 'type' : '(space.w_type)', - 'object' : '(space.w_object)', - 'memoryview' : 'interp_memoryview.W_MemoryView', - 'open' : 'state.get(space).w_open', # interp-level function definitions diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/module/__builtin__/test/test_buffer.py deleted file mode 100644 --- a/pypy/module/__builtin__/test/test_buffer.py +++ /dev/null @@ -1,122 +0,0 @@ -"""Tests some behaviour of the buffer type that is not tested in -lib-python/2.5.2/test/test_types.py where the stdlib buffer tests live.""" - -class AppTestMemoryView: - spaceconfig = dict(usemodules=['array']) - - def test_basic(self): - v = memoryview(b"abc") - assert v.tobytes() == b"abc" - assert len(v) == 3 - assert v[0] == b'a' - assert list(v) == [b'a', b'b', b'c'] - assert v.tolist() == [97, 98, 99] - assert v[1] == b"b" - assert v[-1] == b"c" - raises(TypeError, "v[1] = 'x'") - assert v.readonly is True - w = v[1:234] - assert isinstance(w, memoryview) - assert len(w) == 2 - - def test_array_buffer(self): - import array - b = memoryview(array.array("B", [1, 2, 3])) - assert len(b) == 3 - assert b[0:3] == b"\x01\x02\x03" - - def test_nonzero(self): - assert memoryview(b'\x00') - assert not memoryview(b'') - import array - assert memoryview(array.array("B", [0])) - assert not memoryview(array.array("B", [])) - - def test_bytes(self): - assert bytes(memoryview(b'hello')) == b'hello' - - def test_repr(self): - assert repr(memoryview(b'hello')).startswith(' memoryview(b'ab')") - raises(TypeError, "memoryview(b'ab') >= memoryview(b'ab')") - raises(TypeError, "memoryview(b'ab') < memoryview(b'abc')") - raises(TypeError, "memoryview(b'ab') <= memoryview(b'ab')") - raises(TypeError, "memoryview(b'ab') > memoryview(b'aa')") - raises(TypeError, "memoryview(b'ab') >= memoryview(b'ab')") - - def test_hash(self): - raises(TypeError, "hash(memoryview(b'hello'))") - - def test_getitem_only_ints(self): - class MyInt(object): - def __init__(self, x): - self.x = x - - def __int__(self): - return self.x - - buf = memoryview(b'hello world') - raises(TypeError, "buf[MyInt(0)]") - raises(TypeError, "buf[MyInt(0):MyInt(5)]") - - def test_rw(self): - data = bytearray(b'abcefg') - v = memoryview(data) - assert v.readonly is False - v[0] = b'z' - assert data == bytearray(eval("b'zbcefg'")) - v[1:4] = b'123' - assert data == bytearray(eval("b'z123fg'")) - raises((ValueError, TypeError), "v[2] = 'spam'") - - def test_memoryview_attrs(self): - v = memoryview(b"a"*100) - assert v.format == "B" - assert v.itemsize == 1 - assert v.shape == (100,) - assert v.ndim == 1 - assert v.strides == (1,) - - def test_suboffsets(self): - v = memoryview(b"a"*100) - assert v.suboffsets == None - - def test_release(self): - v = memoryview(b"a"*100) - v.release() - raises(ValueError, len, v) - raises(ValueError, v.tolist) - raises(ValueError, v.tobytes) - raises(ValueError, "v[0]") - raises(ValueError, "v[0] = b'a'") - raises(ValueError, "v.format") - raises(ValueError, "v.itemsize") - raises(ValueError, "v.ndim") - raises(ValueError, "v.readonly") - raises(ValueError, "v.shape") - raises(ValueError, "v.strides") - raises(ValueError, "v.suboffsets") - raises(ValueError, "with v as cm: pass") - raises(ValueError, "memoryview(v)") - assert v == v - assert v != memoryview(b"a"*100) - assert v != b"a"*100 - assert "released memory" in repr(v) - - def test_context_manager(self): - v = memoryview(b"a"*100) - with v as cm: - assert cm is v - assert "released memory" in repr(v) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -25,6 +25,17 @@ else: cls.w_safe_runtimerror = cls.space.wrap(sys.version_info < (2, 6)) + def test_builtin_names(self): + import __builtin__ + assert __builtin__.None is None + assert __builtin__.False is False + assert __builtin__.True is True + + assert __builtin__.buffer is buffer + assert __builtin__.bytes is str + assert __builtin__.dict is dict + assert __builtin__.memoryview is memoryview + def test_bytes_alias(self): assert bytes is not str assert isinstance(eval("b'hi'"), bytes) diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -4,11 +4,9 @@ from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.gateway import unwrap_spec -from pypy.module.__builtin__.interp_memoryview import W_Buffer class ByteBuffer(RWBuffer): - def __init__(self, len): self.data = ['\x00'] * len @@ -24,4 +22,4 @@ @unwrap_spec(length=int) def bytebuffer(space, length): - return W_Buffer(ByteBuffer(length)) + return space.newbuffer(ByteBuffer(length)) diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -4,7 +4,7 @@ from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray -from pypy.module.__builtin__.interp_memoryview import W_Buffer +from pypy.objspace.std.memoryview import W_Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -5,7 +5,6 @@ TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.buffer import RWBuffer -from pypy.module.__builtin__.interp_memoryview import W_Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -522,7 +521,7 @@ def _raw_read(self, space, buffer, start, length): length = intmask(length) - w_buf = W_Buffer(RawBuffer(buffer, start, length)) + w_buf = space.newbuffer(RawBuffer(buffer, start, length)) while True: try: w_size = space.call_method(self.w_raw, "readinto", w_buf) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -22,9 +22,9 @@ from pypy.interpreter.nestedscope import Cell from pypy.interpreter.module import Module from pypy.interpreter.function import StaticMethod +from pypy.objspace.std.memoryview import W_MemoryView from pypy.objspace.std.sliceobject import W_SliceObject from pypy.module.__builtin__.descriptor import W_Property -from pypy.module.__builtin__.interp_memoryview import W_MemoryView from pypy.module.micronumpy.base import W_NDimArray from rpython.rlib.entrypoint import entrypoint_lowlevel from rpython.rlib.rposix import is_valid_fd, validate_fd diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -14,7 +14,6 @@ from pypy.module.cpyext.pyobject import from_ref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State -from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.buffer import Buffer from pypy.interpreter.argument import Arguments @@ -250,7 +249,7 @@ ret = generic_cpy_call(space, func_target, w_self, view, flags) if rffi.cast(lltype.Signed, ret) == -1: space.fromcache(State).check_and_raise_exception(always=True) - return W_Buffer(CPyBuffer(view.c_buf, view.c_len, w_self)) + return space.newbuffer(CPyBuffer(view.c_buf, view.c_len, w_self)) def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -7,7 +7,6 @@ from rpython.rlib.rawstorage import RAW_STORAGE_PTR from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name -from pypy.module.__builtin__.interp_memoryview import W_Buffer from pypy.module.micronumpy import descriptor, ufuncs, boxes, arrayops, loop, \ support, constants as NPY from pypy.module.micronumpy.appbridge import get_appbridge_cache @@ -607,7 +606,7 @@ return self.implementation.get_buffer(space) def descr_get_data(self, space): - return W_Buffer(self.buffer_w(space)) + return space.newbuffer(self.buffer_w(space)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -175,6 +175,9 @@ def newseqiter(self, x): return w_some_obj() + def newbuffer(self, x): + return w_some_obj() + def marshal_w(self, w_obj): "NOT_RPYTHON" raise NotImplementedError diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/objspace/std/memoryview.py rename from pypy/module/__builtin__/interp_memoryview.py rename to pypy/objspace/std/memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/objspace/std/memoryview.py @@ -37,6 +37,7 @@ """ def __init__(self, buf): + assert isinstance(buf, buffer.Buffer) self.buf = buf def buffer_w(self, space): @@ -44,7 +45,7 @@ @staticmethod @unwrap_spec(offset=int, size=int) - def descr_new(space, w_subtype, w_object, offset=0, size=-1): + def descr_new_buffer(space, w_subtype, w_object, offset=0, size=-1): if space.isinstance_w(w_object, space.w_unicode): # unicode objects support the old buffer interface # but not the new buffer interface (change in python 2.7) @@ -145,7 +146,7 @@ start of the object (or at the specified offset). The slice will extend to the end of the target object (or with the specified size). """, - __new__ = interp2app(W_Buffer.descr_new), + __new__ = interp2app(W_Buffer.descr_new_buffer), __len__ = interp2app(W_Buffer.descr_len), __getitem__ = interp2app(W_Buffer.descr_getitem), __setitem__ = interp2app(W_Buffer.descr_setitem), @@ -171,6 +172,7 @@ """ def __init__(self, buf): + assert isinstance(buf, buffer.Buffer) self.buf = buf def buffer_w(self, space): @@ -185,9 +187,8 @@ return self.buf @staticmethod - def descr_new(space, w_subtype, w_object): - w_memoryview = W_MemoryView(space.buffer_w(w_object)) - return w_memoryview + def descr_new_memoryview(space, w_subtype, w_object): + return W_MemoryView(space.buffer_w(w_object)) def _make_descr__cmp(name): def descr__cmp(self, space, w_other): @@ -328,7 +329,7 @@ __doc__ = """\ Create a new memoryview object which references the given object. """, - __new__ = interp2app(W_MemoryView.descr_new), + __new__ = interp2app(W_MemoryView.descr_new_memoryview), __eq__ = interp2app(W_MemoryView.descr_eq), __getitem__ = interp2app(W_MemoryView.descr_getitem), __len__ = interp2app(W_MemoryView.descr_len), diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -61,7 +61,7 @@ from pypy.objspace.std import unicodeobject from pypy.objspace.std import dictproxyobject from pypy.objspace.std import proxyobject - from pypy.module.__builtin__.interp_memoryview import W_Buffer + from pypy.objspace.std.memoryview import W_Buffer, W_MemoryView import pypy.objspace.std.default # register a few catch-all multimethods import pypy.objspace.std.marshal_impl # install marshal multimethods @@ -81,6 +81,7 @@ self.pythontypes.append(boolobject.W_BoolObject.typedef) self.pythontypes.append(longobject.W_LongObject.typedef) self.pythontypes.append(W_Buffer.typedef) + self.pythontypes.append(W_MemoryView.typedef) # the set of implementation types self.typeorder = { diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -25,6 +25,7 @@ from pypy.objspace.std.iterobject import W_AbstractSeqIterObject from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.longobject import W_LongObject, newlong +from pypy.objspace.std.memoryview import W_Buffer from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.objectobject import W_ObjectObject from pypy.objspace.std.iterobject import W_SeqIterObject @@ -339,6 +340,9 @@ def newseqiter(self, w_obj): return W_SeqIterObject(w_obj) + def newbuffer(self, w_obj): + return W_Buffer(w_obj) + def type(self, w_obj): jit.promote(w_obj.__class__) return w_obj.getclass(self) diff --git a/pypy/objspace/std/test/test_memoryview.py b/pypy/objspace/std/test/test_memoryview.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/test/test_memoryview.py @@ -0,0 +1,119 @@ +class AppTestMemoryView: + spaceconfig = dict(usemodules=['array']) + + def test_basic(self): + v = memoryview(b"abc") + assert v.tobytes() == b"abc" + assert len(v) == 3 + assert v[0] == b'a' + assert list(v) == [b'a', b'b', b'c'] + assert v.tolist() == [97, 98, 99] + assert v[1] == b"b" + assert v[-1] == b"c" + raises(TypeError, "v[1] = 'x'") + assert v.readonly is True + w = v[1:234] + assert isinstance(w, memoryview) + assert len(w) == 2 + + def test_array_buffer(self): + import array + b = memoryview(array.array("B", [1, 2, 3])) + assert len(b) == 3 + assert b[0:3] == b"\x01\x02\x03" + + def test_nonzero(self): + assert memoryview(b'\x00') + assert not memoryview(b'') + import array + assert memoryview(array.array("B", [0])) + assert not memoryview(array.array("B", [])) + + def test_bytes(self): + assert bytes(memoryview(b'hello')) == b'hello' + + def test_repr(self): + assert repr(memoryview(b'hello')).startswith(' memoryview(b'ab')") + raises(TypeError, "memoryview(b'ab') >= memoryview(b'ab')") + raises(TypeError, "memoryview(b'ab') < memoryview(b'abc')") + raises(TypeError, "memoryview(b'ab') <= memoryview(b'ab')") + raises(TypeError, "memoryview(b'ab') > memoryview(b'aa')") + raises(TypeError, "memoryview(b'ab') >= memoryview(b'ab')") + + def test_hash(self): + raises(TypeError, "hash(memoryview(b'hello'))") + + def test_getitem_only_ints(self): + class MyInt(object): + def __init__(self, x): + self.x = x + + def __int__(self): + return self.x + + buf = memoryview(b'hello world') + raises(TypeError, "buf[MyInt(0)]") + raises(TypeError, "buf[MyInt(0):MyInt(5)]") + + def test_rw(self): + data = bytearray(b'abcefg') + v = memoryview(data) + assert v.readonly is False + v[0] = b'z' + assert data == bytearray(eval("b'zbcefg'")) + v[1:4] = b'123' + assert data == bytearray(eval("b'z123fg'")) + raises((ValueError, TypeError), "v[2] = 'spam'") + + def test_memoryview_attrs(self): + v = memoryview(b"a"*100) + assert v.format == "B" + assert v.itemsize == 1 + assert v.shape == (100,) + assert v.ndim == 1 + assert v.strides == (1,) + + def test_suboffsets(self): + v = memoryview(b"a"*100) + assert v.suboffsets == None + + def test_release(self): + v = memoryview(b"a"*100) + v.release() + raises(ValueError, len, v) + raises(ValueError, v.tolist) + raises(ValueError, v.tobytes) + raises(ValueError, "v[0]") + raises(ValueError, "v[0] = b'a'") + raises(ValueError, "v.format") + raises(ValueError, "v.itemsize") + raises(ValueError, "v.ndim") + raises(ValueError, "v.readonly") + raises(ValueError, "v.shape") + raises(ValueError, "v.strides") + raises(ValueError, "v.suboffsets") + raises(ValueError, "with v as cm: pass") + raises(ValueError, "memoryview(v)") + assert v == v + assert v != memoryview(b"a"*100) + assert v != b"a"*100 + assert "released memory" in repr(v) + + def test_context_manager(self): + v = memoryview(b"a"*100) + with v as cm: + assert cm is v + assert "released memory" in repr(v) diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -169,6 +169,7 @@ cfile = udir.join('dosmaperr.c') cfile.write(r''' #include + #include #include #ifdef __GNUC__ #define _dosmaperr mingw_dosmaperr From noreply at buildbot.pypy.org Wed Mar 19 01:49:58 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 19 Mar 2014 01:49:58 +0100 (CET) Subject: [pypy-commit] pypy default: cleanups Message-ID: <20140319004958.B1D8F1C0166@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70070:d88001e37970 Date: 2014-03-18 19:45 -0400 http://bitbucket.org/pypy/pypy/changeset/d88001e37970/ Log: cleanups diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -71,8 +71,8 @@ assert 0 <= start <= stop return self.value[start:stop] return "".join([self.value[start + i*step] for i in xrange(size)]) +# ____________________________________________________________ -# ____________________________________________________________ class SubBufferMixin(object): _attrs_ = ['buffer', 'offset', 'size'] @@ -98,7 +98,8 @@ if start == stop: return '' # otherwise, adding self.offset might make them # out of bounds - return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) + return self.buffer.getslice(self.offset + start, self.offset + stop, + step, size) class SubBuffer(Buffer): diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1,25 +1,8 @@ import sys -import py -import py.test - - -## class AppTestSimpleArray: -## spaceconfig = dict(usemodules=('array',)) -## def setup_class(cls): -## cls.w_simple_array = cls.space.appexec([], """(): -## import array -## return array.simple_array -## """) - -## def test_simple(self): -## a = self.simple_array(10) -## a[5] = 7.42 -## assert a[5] == 7.42 +import pytest class BaseArrayTests: - - def test_ctor(self): assert len(self.array('c')) == 0 assert len(self.array('i')) == 0 @@ -563,7 +546,6 @@ assert not a > 2*a assert not a >= 2*a - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) @@ -794,7 +776,6 @@ assert img[3, 25] == 3 * 9 - def test_override_from(self): class mya(self.array): def fromlist(self, lst): @@ -879,41 +860,41 @@ def test_assign_object_with_special_methods(self): from array import array - + class Num(object): def __float__(self): return 5.25 - + def __int__(self): return 7 - + class NotNum(object): pass - + class Silly(object): def __float__(self): return None - + def __int__(self): - return None + return None class OldNum: def __float__(self): return 6.25 - + def __int__(self): return 8 - + class OldNotNum: pass - + class OldSilly: def __float__(self): return None - + def __int__(self): return None - + for tc in 'bBhHiIlL': a = array(tc, [0]) raises(TypeError, a.__setitem__, 0, 1.0) @@ -931,7 +912,7 @@ a = array(tc, [0]) a[0] = 1.0 a[0] = 1 - a[0] = Num() + a[0] = Num() assert a[0] == 5.25 raises(TypeError, a.__setitem__, NotNum()) a[0] = OldNum() @@ -939,24 +920,23 @@ raises(TypeError, a.__setitem__, OldNotNum()) raises(TypeError, a.__setitem__, Silly()) raises(TypeError, a.__setitem__, OldSilly()) - + a = array('c', 'hi') a[0] = 'b' assert a[0] == 'b' - + a = array('u', u'hi') a[0] = u'b' assert a[0] == u'b' - + class TestCPythonsOwnArray(BaseArrayTests): - def setup_class(cls): import array cls.array = array.array import struct cls.struct = struct - cls.tempfile = str(py.test.ensuretemp('array').join('tmpfile')) + cls.tempfile = str(pytest.ensuretemp('array').join('tmpfile')) cls.maxint = sys.maxint @@ -969,7 +949,7 @@ return array.array """) cls.w_tempfile = cls.space.wrap( - str(py.test.ensuretemp('array').join('tmpfile'))) + str(pytest.ensuretemp('array').join('tmpfile'))) cls.w_maxint = cls.space.wrap(sys.maxint) def test_buffer_info(self): @@ -1036,11 +1016,11 @@ def test_getitem_only_ints(self): class MyInt(object): - def __init__(self, x): - self.x = x + def __init__(self, x): + self.x = x - def __int__(self): - return self.x + def __int__(self): + return self.x a = self.array('i', [1, 2, 3, 4, 5, 6]) raises(TypeError, "a[MyInt(0)]") @@ -1050,4 +1030,3 @@ class AppTestArrayBuiltinShortcut(AppTestArray): spaceconfig = AppTestArray.spaceconfig.copy() spaceconfig['objspace.std.builtinshortcut'] = True - diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,7 +1,5 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase -import py -import sys class AppTestArrayModule(AppTestCpythonExtensionBase): enable_leak_checking = False @@ -21,7 +19,7 @@ module = self.import_module(name='array') arr = module.array('i', [1,2,3]) sum = 0 - for i in arr: + for i in arr: sum += i assert sum == 6 @@ -60,4 +58,3 @@ '\x02\0\0\0' '\x03\0\0\0' '\x04\0\0\0') - From noreply at buildbot.pypy.org Wed Mar 19 01:50:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 19 Mar 2014 01:50:00 +0100 (CET) Subject: [pypy-commit] pypy default: test/fix marshal behavior wrt buffers Message-ID: <20140319005000.1E6B31C0166@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70071:07f40b57d13c Date: 2014-03-18 20:35 -0400 http://bitbucket.org/pypy/pypy/changeset/07f40b57d13c/ Log: test/fix marshal behavior wrt buffers diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -217,11 +217,6 @@ self.space.marshal_w(w_obj, self) def dump_w_obj(self, w_obj): - space = self.space - if (space.type(w_obj).is_heaptype() and - space.lookup(w_obj, "__buffer__") is None): - w_err = space.wrap("only builtins can be marshaled") - raise OperationError(space.w_ValueError, w_err) try: self.put_w_obj(w_obj) except rstackovf.StackOverflow: diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -2,6 +2,8 @@ class AppTestMarshal: + spaceconfig = {'usemodules': ['array']} + def setup_class(cls): tmpfile = udir.join('AppTestMarshal.tmp') cls.w_tmpfile = cls.space.wrap(str(tmpfile)) @@ -173,7 +175,15 @@ for cls in types: class subtype(cls): pass - raises(ValueError, marshal.dumps, subtype) + exc = raises(ValueError, marshal.dumps, subtype) + assert str(exc.value) == 'unmarshallable object' + + def test_valid_subtypes(self): + import marshal + from array import array + class subtype(array): + pass + assert marshal.dumps(subtype('c', 'test')) == marshal.dumps(array('c', 'test')) def test_bad_typecode(self): import marshal @@ -182,7 +192,8 @@ class AppTestSmallLong(AppTestMarshal): - spaceconfig = {"objspace.std.withsmalllong": True} + spaceconfig = AppTestMarshal.spaceconfig.copy() + spaceconfig["objspace.std.withsmalllong"] = True def test_smalllong(self): import __pypy__ From noreply at buildbot.pypy.org Wed Mar 19 01:50:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 19 Mar 2014 01:50:01 +0100 (CET) Subject: [pypy-commit] pypy default: fix buffer_w on objects defining pypy's app-level buffer interface Message-ID: <20140319005001.86ED61C0166@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70072:66fcaf1343c6 Date: 2014-03-18 20:41 -0400 http://bitbucket.org/pypy/pypy/changeset/66fcaf1343c6/ Log: fix buffer_w on objects defining pypy's app-level buffer interface diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -195,6 +195,11 @@ return None def buffer_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.buffer_w(space) self._typed_unwrap_error(space, "buffer") def str_w(self, space): diff --git a/pypy/objspace/std/test/test_memoryview.py b/pypy/objspace/std/test/test_memoryview.py --- a/pypy/objspace/std/test/test_memoryview.py +++ b/pypy/objspace/std/test/test_memoryview.py @@ -2,10 +2,14 @@ spaceconfig = dict(usemodules=['array']) def test_init(self): + import sys class A(object): def __buffer__(self): return buffer('123') - raises(TypeError, buffer, A()) + if '__pypy__' not in sys.builtin_module_names: + raises(TypeError, buffer, A()) + else: + assert buffer(A()) == buffer('123') def test_unicode_buffer(self): import sys From noreply at buildbot.pypy.org Wed Mar 19 02:50:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 19 Mar 2014 02:50:20 +0100 (CET) Subject: [pypy-commit] pypy default: unused imports/variables Message-ID: <20140319015020.5665C1C0166@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70073:4de7fd2dd15f Date: 2014-03-18 21:34 -0400 http://bitbucket.org/pypy/pypy/changeset/4de7fd2dd15f/ Log: unused imports/variables diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -11,7 +11,6 @@ """ from pypy.interpreter import function -from pypy.objspace.descroperation import object_getattribute from rpython.rlib import jit from pypy.objspace.std.mapdict import LOOKUP_METHOD_mapdict, \ LOOKUP_METHOD_mapdict_fill_cache_method @@ -36,7 +35,6 @@ if space.config.objspace.std.withmapdict and not jit.we_are_jitted(): # mapdict has an extra-fast version of this function - from pypy.objspace.std.mapdict import LOOKUP_METHOD_mapdict if LOOKUP_METHOD_mapdict(f, nameindex, w_obj): return @@ -79,7 +77,7 @@ n_kwargs = (oparg >> 8) & 0xff w_self = f.peekvalue(n_args + (2 * n_kwargs)) n = n_args + (w_self is not None) - + if not n_kwargs: w_callable = f.peekvalue(n_args + (2 * n_kwargs) + 1) try: @@ -98,7 +96,7 @@ key = f.space.str_w(w_key) keywords[n_kwargs] = key keywords_w[n_kwargs] = w_value - + arguments = f.popvalues(n) # includes w_self if it is not None args = f.argument_factory(arguments, keywords, keywords_w, None, None) if w_self is None: diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -140,7 +140,7 @@ for key, cell in iterator()] def clear(self, w_dict): - iterator = self.unerase(w_dict.dstorage).clear() + self.unerase(w_dict.dstorage).clear() self.mutated() def popitem(self, w_dict): diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -1,4 +1,5 @@ -from pypy.interpreter import gateway +import math + from pypy.interpreter.error import OperationError from pypy.objspace.std import newformat from pypy.objspace.std.intobject import W_IntObject @@ -12,8 +13,6 @@ from rpython.rlib import jit, rcomplex from rpython.rlib.rarithmetic import intmask, r_ulonglong -import math - class W_AbstractComplexObject(W_Object): __slots__ = () diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -1,7 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.stdtypedef import GetSetProperty, StdTypeDef from pypy.objspace.std.stdtypedef import StdObjSpaceMultiMethod from rpython.rlib.rfloat import string_to_float diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1184,7 +1184,7 @@ w_clone.setup_iterator() # spool until we have the same pos while w_clone.pos < self.pos: - w_obj = w_clone.next_entry() + w_clone.next_entry() w_clone.pos += 1 stuff = [w_clone.next_entry() for i in range(w_clone.pos, w_clone.len)] w_res = space.newlist(stuff) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -1,7 +1,6 @@ -from pypy.objspace.std.model import registerimplementation, W_Object -from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.dictmultiobject import W_DictMultiObject, create_iterator_classes -from pypy.objspace.std.dictmultiobject import DictStrategy +#from pypy.objspace.std.model import registerimplementation, W_Object +#from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.dictmultiobject import DictStrategy, create_iterator_classes from pypy.objspace.std.typeobject import unwrap_cell from pypy.interpreter.error import OperationError, oefmt @@ -9,7 +8,6 @@ class DictProxyStrategy(DictStrategy): - erase, unerase = rerased.new_erasing_pair("dictproxy") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -62,7 +60,6 @@ w_type.dict_w[key] = w_value def setdefault(self, w_dict, w_key, w_default): - space = self.space w_result = self.getitem(w_dict, w_key) if w_result is not None: return w_result diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -1,12 +1,12 @@ +import math import operator from pypy.interpreter.error import OperationError, oefmt -from pypy.objspace.std import model, newformat +from pypy.objspace.std import newformat from pypy.objspace.std.floattype import float_typedef, W_AbstractFloatObject from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.longobject import W_LongObject from rpython.rlib.rarithmetic import ovfcheck_float_to_int, intmask, LONG_BIT from rpython.rlib.rfloat import ( @@ -16,8 +16,6 @@ from rpython.rlib import rfloat from rpython.tool.sourcetools import func_with_new_name - -import math from pypy.objspace.std.intobject import W_IntObject diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -4,12 +4,11 @@ translation this module uses rarithmetic.ovfcheck to explicitly check for overflows, something CPython does not do anymore. """ - import operator import sys from rpython.rlib import jit -from rpython.rlib.objectmodel import instantiate, import_from_mixin, specialize +from rpython.rlib.objectmodel import instantiate from rpython.rlib.rarithmetic import ( LONG_BIT, is_valid_int, ovfcheck, r_longlong, r_uint, string_to_int) from rpython.rlib.rbigint import rbigint @@ -20,7 +19,6 @@ from pypy.interpreter import typedef from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.objspace.std import newformat @@ -28,12 +26,10 @@ BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT) from pypy.objspace.std.stdtypedef import StdTypeDef - SENTINEL = object() class W_AbstractIntObject(W_Root): - __slots__ = () def is_w(self, space, w_other): @@ -706,7 +702,6 @@ else: value, w_longval = _string_to_int_or_long(space, w_value, buf.as_str()) - ok = True else: base = space.int_w(w_base) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -9,7 +9,6 @@ from pypy.interpreter import typedef from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -10,7 +10,7 @@ from pypy.interpreter.error import OperationError from pypy.objspace.std.register_all import register_all -from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_uint, intmask +from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_uint from pypy.objspace.std import model from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.interpreter.special import Ellipsis @@ -201,7 +201,6 @@ register(TYPE_BINARY_COMPLEX, unmarshal_Complex_bin) def marshal_w__Long(space, w_long, m): - from rpython.rlib.rbigint import rbigint from rpython.rlib.rarithmetic import r_ulonglong m.start(TYPE_LONG) SHIFT = 15 @@ -375,7 +374,6 @@ lng = u.atom_lng(tc) res = [None] * lng idx = 0 - space = u.space while idx < lng: res[idx] = unmarshal_str(u) idx += 1 diff --git a/pypy/objspace/std/multimethod.py b/pypy/objspace/std/multimethod.py --- a/pypy/objspace/std/multimethod.py +++ b/pypy/objspace/std/multimethod.py @@ -866,7 +866,6 @@ entryarray = CompressedArray(null_entry) indexarray = self.mrdtable.indexarray lst = self.mrdtable.list_of_types - indexline = [] def compress(typesprefix, typesnum): if len(typesprefix) == self.multimethod.arity: diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -461,7 +461,6 @@ if not got_align: self._align = "=" i += 1 - start_i = i self._width, i = _parse_int(self.space, spec, i, length) if length != i and spec[i] == ",": self._thousands_sep = True @@ -576,7 +575,6 @@ return space.wrap(self._pad(string)) def _get_locale(self, tp): - space = self.space if tp == "n": dec, thousands, grouping = rlocale.numeric_formatting() elif self._thousands_sep: @@ -673,12 +671,10 @@ grouping = self._loc_grouping min_width = spec.n_min_width grouping_state = 0 - count = 0 left = spec.n_digits n_ts = len(self._loc_thousands) need_separator = False done = False - groupings = len(grouping) previous = 0 while True: group = ord(grouping[grouping_state]) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -306,7 +306,6 @@ strdict=strdict, kwargs=kwargs) def newset(self): - from pypy.objspace.std.setobject import newset return W_SetObject(self, None) def newslice(self, w_start, w_end, w_step): diff --git a/pypy/objspace/std/proxy_helpers.py b/pypy/objspace/std/proxy_helpers.py --- a/pypy/objspace/std/proxy_helpers.py +++ b/pypy/objspace/std/proxy_helpers.py @@ -1,10 +1,8 @@ - """ Some transparent helpers, put here because of cyclic imports """ -from pypy.objspace.std.model import W_ANY, W_Object -from pypy.interpreter import baseobjspace +from pypy.objspace.std.model import W_ANY from pypy.interpreter.argument import Arguments from rpython.tool.sourcetools import func_with_new_name @@ -24,7 +22,7 @@ def function(space, w_transparent_list, __args__): args = __args__.prepend(space.wrap(op_name)) return space.call_args(w_transparent_list.w_controller, args) - + function = func_with_new_name(function, mm.name) mm.register(function, type_) @@ -32,14 +30,14 @@ def function(space, w_transparent_list, *args_w): args = Arguments(space, [space.wrap(op_name)] + list(args_w[:-1]) + args_w[-1]) return space.call_args(w_transparent_list.w_controller, args) - + function = func_with_new_name(function, mm.name) mm.register(function, type_, *([W_ANY] * (mm.arity - 1))) def install_mm_trampoline(type_, mm, is_local): classname = type_.__name__[2:] mm_name, op_name = create_mm_names(classname, mm, is_local) - + if ['__args__'] == mm.argnames_after: return install_general_args_trampoline(type_, mm, is_local, op_name) if ['args_w'] == mm.argnames_after: @@ -58,10 +56,10 @@ """ if mm.arity != 2: return False - + if len(mm.specialnames) != 2: return False - + # search over the signatures for signature in mm.signatures(): if signature == (type_.original, type_.original): @@ -69,21 +67,21 @@ return False def install_mm_special(type_, mm, is_local): - classname = type_.__name__[2:] + #classname = type_.__name__[2:] #mm_name, op_name = create_mm_names(classname, mm, is_local) - + def function(space, w_any, w_transparent_list): retval = space.call_function(w_transparent_list.w_controller, space.wrap(mm.specialnames[1]), w_any) return retval - + function = func_with_new_name(function, mm.specialnames[0]) - + mm.register(function, type_.typedef.any, type_) def register_type(type_): from pypy.objspace.std.stdtypedef import multimethods_defined_on - + for mm, is_local in multimethods_defined_on(type_.original): if not mm.name.startswith('__'): install_mm_trampoline(type_, mm, is_local) diff --git a/pypy/objspace/std/proxyobject.py b/pypy/objspace/std/proxyobject.py --- a/pypy/objspace/std/proxyobject.py +++ b/pypy/objspace/std/proxyobject.py @@ -1,15 +1,8 @@ - """ transparent list implementation """ - -from pypy.objspace.std.model import W_Object from pypy.interpreter.error import OperationError from pypy.interpreter import baseobjspace -#class W_Transparent(W_Object): -# def __init__(self, w_controller): -# self.controller = w_controller - def transparent_class(name, BaseCls): class W_Transparent(BaseCls): @@ -72,25 +65,3 @@ return W_Transparent W_Transparent = transparent_class('W_Transparent', baseobjspace.W_Root) -#W_TransparentObject = transparent_class('W_TransparentObject', W_Object) - -#from pypy.objspace.std.objecttype import object_typedef -#W_TransparentObject.typedef = object_typedef - -from pypy.interpreter.typedef import Function, GeneratorIterator, PyTraceback, \ - PyFrame, PyCode - -class W_TransparentFunction(W_Transparent): - typedef = Function.typedef - -class W_TransparentTraceback(W_Transparent): - typedef = PyTraceback.typedef - -class W_TransparentCode(W_Transparent): - typedef = PyCode.typedef - -class W_TransparentFrame(W_Transparent): - typedef = PyFrame.typedef - -class W_TransparentGenerator(W_Transparent): - typedef = GeneratorIterator.typedef diff --git a/pypy/objspace/std/slicetype.py b/pypy/objspace/std/slicetype.py --- a/pypy/objspace/std/slicetype.py +++ b/pypy/objspace/std/slicetype.py @@ -1,4 +1,4 @@ -from pypy.interpreter import baseobjspace, gateway +from pypy.interpreter import gateway from pypy.interpreter.typedef import GetSetProperty from pypy.objspace.std.stdtypedef import StdTypeDef, SMM from pypy.objspace.std.register_all import register_all diff --git a/pypy/objspace/std/stdtypedef.py b/pypy/objspace/std/stdtypedef.py --- a/pypy/objspace/std/stdtypedef.py +++ b/pypy/objspace/std/stdtypedef.py @@ -1,8 +1,7 @@ -from pypy.interpreter import gateway, baseobjspace, argument +from pypy.interpreter import gateway, baseobjspace from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.typedef import TypeDef, GetSetProperty, Member -from pypy.interpreter.typedef import descr_get_dict, descr_set_dict -from pypy.interpreter.typedef import descr_del_dict +from pypy.interpreter.typedef import TypeDef, GetSetProperty, \ + descr_get_dict, descr_set_dict, descr_del_dict from pypy.interpreter.baseobjspace import SpaceCache from pypy.objspace.std import model from pypy.objspace.std.model import StdObjSpaceMultiMethod diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -437,7 +437,6 @@ def descr_split(self, space, w_sep=None, maxsplit=-1): res = [] value = self._val(space) - length = len(value) if space.is_none(w_sep): res = split(value, maxsplit=maxsplit) return self._newlist_unwrapped(space, res) diff --git a/pypy/objspace/std/transparent.py b/pypy/objspace/std/transparent.py --- a/pypy/objspace/std/transparent.py +++ b/pypy/objspace/std/transparent.py @@ -1,14 +1,30 @@ - """ transparent.py - Several transparent proxy helpers """ - from pypy.interpreter import gateway from pypy.interpreter.error import OperationError, oefmt -from pypy.objspace.std.proxyobject import * +from pypy.interpreter.typedef import Function, GeneratorIterator, PyTraceback, \ + PyFrame, PyCode +from pypy.objspace.std.proxyobject import W_Transparent from pypy.objspace.std.typeobject import W_TypeObject -from rpython.rlib.objectmodel import r_dict from rpython.rlib.unroll import unrolling_iterable + +class W_TransparentFunction(W_Transparent): + typedef = Function.typedef + +class W_TransparentTraceback(W_Transparent): + typedef = PyTraceback.typedef + +class W_TransparentCode(W_Transparent): + typedef = PyCode.typedef + +class W_TransparentFrame(W_Transparent): + typedef = PyFrame.typedef + +class W_TransparentGenerator(W_Transparent): + typedef = GeneratorIterator.typedef + + class TypeCache(object): def __init__(self): self.cache = [] @@ -28,13 +44,10 @@ space.wrap(app_proxy_controller)) - def proxy(space, w_type, w_controller): """tproxy(typ, controller) -> obj Return something that looks like it is of type typ. Its behaviour is completely controlled by the controller.""" - from pypy.interpreter.typedef import Function, PyTraceback, PyFrame, \ - PyCode, GeneratorIterator if not space.is_true(space.callable(w_controller)): raise OperationError(space.w_TypeError, space.wrap("controller should be function")) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -2,12 +2,12 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.function import Function, StaticMethod -from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\ +from pypy.interpreter.typedef import weakref_descr, GetSetProperty, Member, \ descr_get_dict from pypy.interpreter.astcompiler.misc import mangle from pypy.objspace.std.model import W_Object from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.stdtypedef import std_dict_descr, issubtypedef, Member +from pypy.objspace.std.stdtypedef import std_dict_descr, issubtypedef from pypy.objspace.std.stdtypedef import StdTypeDef from rpython.rlib.jit import (promote, elidable_promote, we_are_jitted, From noreply at buildbot.pypy.org Wed Mar 19 03:15:50 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 19 Mar 2014 03:15:50 +0100 (CET) Subject: [pypy-commit] pypy default: more unused Message-ID: <20140319021550.D0DB11C0166@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70074:851618b4e09d Date: 2014-03-18 22:10 -0400 http://bitbucket.org/pypy/pypy/changeset/851618b4e09d/ Log: more unused diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -1,7 +1,6 @@ """ Buffer protocol support. """ -from pypy.interpreter.error import OperationError from rpython.rlib.objectmodel import import_from_mixin diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -4,7 +4,6 @@ from pypy.interpreter.typedef import TypeDef from rpython.rlib.rstring import UnicodeBuilder, StringBuilder from rpython.tool.sourcetools import func_with_new_name -from rpython.rlib import jit def create_builder(name, strtype, builder_cls): diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,4 +1,3 @@ -from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import unwrap_spec, interp2app diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -80,7 +80,6 @@ return (cfield.ctype, cfield.offset) def _copy_from_same(self, cdata, w_ob): - space = self.space if isinstance(w_ob, cdataobj.W_CData): if w_ob.ctype is self and self.size >= 0: misc._raw_memcopy(w_ob._cdata, cdata, self.size) diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -1,4 +1,3 @@ -import weakref from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import keepalive_until_here, specialize -from rpython.rlib.rarithmetic import r_uint, r_ulonglong, is_signed_integer_type +from rpython.rlib.rarithmetic import r_uint, r_ulonglong from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/pypy/module/_file/__init__.py b/pypy/module/_file/__init__.py --- a/pypy/module/_file/__init__.py +++ b/pypy/module/_file/__init__.py @@ -1,7 +1,6 @@ - # Package initialisation from pypy.interpreter.mixedmodule import MixedModule -import sys + class Module(MixedModule): appleveldefs = { diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -1,4 +1,3 @@ - """ The ffi for rpython, need to be imported for side effects """ @@ -8,8 +7,6 @@ from rpython.rtyper.extfunc import register_external from pypy.module._minimal_curses import interp_curses from rpython.translator.tool.cbuild import ExternalCompilationInfo -from sys import platform -import os.path # We cannot trust ncurses5-config, it's broken in various ways in # various versions. For example it might not list -ltinfo even though diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -3,7 +3,6 @@ from pypy.interpreter.pycode import PyCode from pypy.interpreter.function import Function, Method from pypy.interpreter.module import Module -from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pytraceback import PyTraceback from pypy.interpreter.generator import GeneratorIteratorWithDel from rpython.rlib.objectmodel import instantiate diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py --- a/pypy/module/_pypyjson/interp_decoder.py +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -1,13 +1,10 @@ import sys -import math from rpython.rlib.rstring import StringBuilder from rpython.rlib.objectmodel import specialize from rpython.rlib import rfloat, runicode from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter import unicodehelper -from rpython.rtyper.annlowlevel import llstr, hlunicode OVF_DIGITS = len(str(sys.maxint)) @@ -30,7 +27,7 @@ Internally it's implemented at the level of low-level helpers, to avoid the extra copy we would need if we take the actual slice first. - + No bound checking is done, use carefully. """ from rpython.rtyper.annlowlevel import llstr, hlunicode @@ -226,7 +223,6 @@ def decode_array(self, i): w_list = self.space.newlist([]) start = i - count = 0 i = self.skip_whitespace(start) if self.ll_chars[i] == ']': self.pos = i+1 diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py --- a/pypy/module/_rawffi/callback.py +++ b/pypy/module/_rawffi/callback.py @@ -2,7 +2,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr +from pypy.module._rawffi.interp_rawffi import write_ptr from pypy.module._rawffi.structure import W_Structure from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp, unwrap_value, unpack_argshapes, got_libffi_error) diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -15,7 +15,7 @@ from pypy.module._rawffi.interp_rawffi import size_alignment from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib import clibffi, rgc -from rpython.rlib.rarithmetic import intmask, signedtype, widen, r_uint, \ +from rpython.rlib.rarithmetic import intmask, signedtype, r_uint, \ r_ulonglong from rpython.rtyper.lltypesystem import lltype, rffi diff --git a/pypy/module/_rawffi/tracker.py b/pypy/module/_rawffi/tracker.py --- a/pypy/module/_rawffi/tracker.py +++ b/pypy/module/_rawffi/tracker.py @@ -1,4 +1,3 @@ - """ The file that keeps track about freed/kept-alive objects allocated by _rawffi. Used for debugging ctypes """ diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -3,7 +3,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from rpython.rtyper.lltypesystem import lltype -from rpython.rlib.rarithmetic import ovfcheck_float_to_int, intmask +from rpython.rlib.rarithmetic import intmask from rpython.rlib import rposix from rpython.translator.tool.cbuild import ExternalCompilationInfo import os diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -2,7 +2,6 @@ Version numbers exposed by PyPy through the 'sys' module. """ import os -import re from rpython.translator.platform import platform from pypy.interpreter import gateway diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -4,12 +4,12 @@ """ import os +from rpython.rlib import rposix +from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rstring import StringBuilder from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform as platform from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rlib.rarithmetic import r_uint, intmask -from rpython.rlib import rposix -from rpython.rlib.rstring import StringBuilder includes = ['stdio.h', 'sys/types.h'] if os.name == "posix": From noreply at buildbot.pypy.org Wed Mar 19 04:03:31 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 19 Mar 2014 04:03:31 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix/cleanup imports Message-ID: <20140319030331.73FF11D2812@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70075:9b55b0c3aacc Date: 2014-03-18 15:30 -0700 http://bitbucket.org/pypy/pypy/changeset/9b55b0c3aacc/ Log: fix/cleanup imports diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,9 +1,10 @@ -from pypy.interpreter.error import OperationError -from pypy.module.cpyext.api import cpython_api, Py_buffer +from rpython.rtyper.lltypesystem import lltype + +from pypy.interpreter.error import oefmt +from pypy.module.cpyext.api import Py_buffer, cpython_api from pypy.module.cpyext.pyobject import PyObject, from_ref from pypy.module.cpyext.buffer import CBuffer -from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.__builtin__.interp_memoryview import W_MemoryView +from pypy.objspace.std.memoryview import W_MemoryView @cpython_api([PyObject], PyObject) def PyMemoryView_FromObject(space, w_obj): @@ -16,8 +17,9 @@ means you shouldn't try to call PyBuffer_Release() yourself: it will be done on deallocation of the memoryview object.""" if not view.c_buf: - msg = "cannot make memory view from a buffer with a NULL data pointer" - raise OperationError(space.w_ValueError, space.wrap(msg)) + raise oefmt(space.w_ValueError, + "cannot make memory view from a buffer with a NULL data " + "pointer") w_obj = from_ref(space, view.c_obj) buf = CBuffer(space, view.c_buf, view.c_len, w_obj) return space.wrap(W_MemoryView(buf)) From noreply at buildbot.pypy.org Wed Mar 19 04:03:32 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 19 Mar 2014 04:03:32 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix translation Message-ID: <20140319030332.DECD81D2812@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70076:4656f3c15efb Date: 2014-03-18 20:02 -0700 http://bitbucket.org/pypy/pypy/changeset/4656f3c15efb/ Log: fix translation diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -7,6 +7,7 @@ from rpython.rlib.rarithmetic import r_longlong from pypy.module._io.interp_bufferedio import W_BufferedIOBase from pypy.module._io.interp_iobase import convert_size +from pypy.objspace.std.memoryview import W_MemoryView import sys @@ -121,7 +122,7 @@ return space.wrap(size) def getbuffer_w(self, space): - return space.wrap(BytesIOBuffer(self)) + return space.wrap(W_MemoryView(BytesIOBuffer(self))) def getvalue_w(self, space): self._check_closed(space) diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -37,5 +37,4 @@ class CBuffer(CBufferMixin, buffer.Buffer): def __del__(self): - self.enqueue_for_destruction(self.space, CBufferMixin.destructor, - 'internal __del__ of ') + CBufferMixin.destructor(self) From noreply at buildbot.pypy.org Wed Mar 19 05:25:28 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 19 Mar 2014 05:25:28 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_marshal Message-ID: <20140319042528.B26181D2808@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70077:e9a9d862dd67 Date: 2014-03-19 00:24 -0400 http://bitbucket.org/pypy/pypy/changeset/e9a9d862dd67/ Log: fix test_marshal diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -7,7 +7,7 @@ Py_MARSHAL_VERSION = 2 - at unwrap_spec(w_version = WrappedDefault(Py_MARSHAL_VERSION)) + at unwrap_spec(w_version=WrappedDefault(Py_MARSHAL_VERSION)) def dump(space, w_data, w_f, w_version): """Write the 'data' object into the open file 'f'.""" # special case real files for performance @@ -24,7 +24,7 @@ finally: writer.finished() - at unwrap_spec(w_version = WrappedDefault(Py_MARSHAL_VERSION)) + at unwrap_spec(w_version=WrappedDefault(Py_MARSHAL_VERSION)) def dumps(space, w_data, w_version): """Return the string that would have been written to a file by dump(data, file).""" @@ -217,6 +217,16 @@ self.space.marshal_w(w_obj, self) def dump_w_obj(self, w_obj): + space = self.space + if space.type(w_obj).is_heaptype(): + try: + buf = space.buffer_w(w_obj) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + self.raise_exc("unmarshallable object") + else: + w_obj = space.newbuffer(buf) try: self.put_w_obj(w_obj) except rstackovf.StackOverflow: diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -173,10 +173,13 @@ import marshal types = (float, complex, int, long, tuple, list, dict, set, frozenset) for cls in types: + print cls class subtype(cls): pass exc = raises(ValueError, marshal.dumps, subtype) assert str(exc.value) == 'unmarshallable object' + exc = raises(ValueError, marshal.dumps, subtype()) + assert str(exc.value) == 'unmarshallable object' def test_valid_subtypes(self): import marshal From noreply at buildbot.pypy.org Wed Mar 19 05:58:00 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 19 Mar 2014 05:58:00 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill __builtin__.buffer, adjust test Message-ID: <20140319045800.EA9081C0166@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70078:0f937ae8b835 Date: 2014-03-18 20:08 -0700 http://bitbucket.org/pypy/pypy/changeset/0f937ae8b835/ Log: kill __builtin__.buffer, adjust test diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -26,15 +26,11 @@ cls.w_safe_runtimerror = cls.space.wrap(sys.version_info < (2, 6)) def test_builtin_names(self): - import __builtin__ - assert __builtin__.None is None - assert __builtin__.False is False - assert __builtin__.True is True - - assert __builtin__.buffer is buffer - assert __builtin__.bytes is str + import builtins as __builtin__ + assert __builtin__.bytes is bytes assert __builtin__.dict is dict assert __builtin__.memoryview is memoryview + assert not hasattr(__builtin__, 'buffer') def test_bytes_alias(self): assert bytes is not str diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -61,7 +61,7 @@ from pypy.objspace.std import unicodeobject from pypy.objspace.std import dictproxyobject from pypy.objspace.std import proxyobject - from pypy.objspace.std.memoryview import W_Buffer, W_MemoryView + from pypy.objspace.std.memoryview import W_MemoryView import pypy.objspace.std.default # register a few catch-all multimethods import pypy.objspace.std.marshal_impl # install marshal multimethods @@ -80,7 +80,6 @@ self.pythontypes.append(intobject.W_IntObject.typedef) self.pythontypes.append(boolobject.W_BoolObject.typedef) self.pythontypes.append(longobject.W_LongObject.typedef) - self.pythontypes.append(W_Buffer.typedef) self.pythontypes.append(W_MemoryView.typedef) # the set of implementation types From noreply at buildbot.pypy.org Wed Mar 19 05:58:02 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 19 Mar 2014 05:58:02 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to buffer refactor: not everything exposes a __buffer__ now Message-ID: <20140319045802.534A91C0166@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70079:aaba1a61bc19 Date: 2014-03-18 21:18 -0700 http://bitbucket.org/pypy/pypy/changeset/aaba1a61bc19/ Log: adapt to buffer refactor: not everything exposes a __buffer__ now diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -40,16 +40,19 @@ if len(__args__.arguments_w) > 0: w_initializer = __args__.arguments_w[0] - if space.lookup(w_initializer, '__buffer__') is not None: - if isinstance(w_initializer, W_ArrayBase): - a.extend(w_initializer, True) - else: - a.descr_frombytes(space, - space.bufferstr_w(w_initializer)) + if isinstance(w_initializer, W_ArrayBase): + a.extend(w_initializer, True) elif space.type(w_initializer) is space.w_list: a.descr_fromlist(space, w_initializer) else: - a.extend(w_initializer, True) + try: + buf = space.bufferstr_w(w_initializer) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + a.extend(w_initializer, True) + else: + a.descr_frombytes(space, buf) break else: msg = 'bad typecode (must be b, B, u, h, H, i, I, l, L, f or d)' From noreply at buildbot.pypy.org Wed Mar 19 05:58:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 19 Mar 2014 05:58:03 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill W_Buffer Message-ID: <20140319045803.B52B71C0166@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70080:415ae19bb4d1 Date: 2014-03-18 21:56 -0700 http://bitbucket.org/pypy/pypy/changeset/415ae19bb4d1/ Log: kill W_Buffer diff --git a/pypy/objspace/std/memoryview.py b/pypy/objspace/std/memoryview.py --- a/pypy/objspace/std/memoryview.py +++ b/pypy/objspace/std/memoryview.py @@ -31,141 +31,6 @@ " slicing with a step")) -class W_Buffer(W_Root): - """Implement the built-in 'buffer' type as a wrapper around - an interp-level buffer. - """ - - def __init__(self, buf): - assert isinstance(buf, buffer.Buffer) - self.buf = buf - - def buffer_w(self, space): - return self.buf - - @staticmethod - @unwrap_spec(offset=int, size=int) - def descr_new_buffer(space, w_subtype, w_object, offset=0, size=-1): - if space.isinstance_w(w_object, space.w_unicode): - # unicode objects support the old buffer interface - # but not the new buffer interface (change in python 2.7) - from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE - unistr = space.unicode_w(w_object) - builder = StringBuilder(len(unistr) * UNICODE_SIZE) - for unich in unistr: - pack_unichar(unich, builder) - from pypy.interpreter.buffer import StringBuffer - buf = StringBuffer(builder.build()) - else: - buf = space.buffer_w(w_object) - - if offset == 0 and size == -1: - return W_Buffer(buf) - # handle buffer slices - if offset < 0: - raise OperationError(space.w_ValueError, - space.wrap("offset must be zero or positive")) - if size < -1: - raise OperationError(space.w_ValueError, - space.wrap("size must be zero or positive")) - if isinstance(buf, buffer.RWBuffer): - buf = buffer.RWSubBuffer(buf, offset, size) - else: - buf = buffer.SubBuffer(buf, offset, size) - return W_Buffer(buf) - - def descr_len(self, space): - return space.wrap(self.buf.getlength()) - - def descr_getitem(self, space, w_index): - start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) - if step == 0: # index only - return space.wrapbytes(self.buf.getitem(start)) - res = self.buf.getslice(start, stop, step, size) - return space.wrapbytes(res) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - if not isinstance(self.buf, buffer.RWBuffer): - raise OperationError(space.w_TypeError, - space.wrap("buffer is read-only")) - _buffer_setitem(space, self.buf, w_index, newstring) - - def descr_str(self, space): - return space.wrap(self.buf.as_str()) - - @unwrap_spec(other='bufferstr') - def descr_add(self, space, other): - return space.wrapbytes(self.buf.as_str() + other) - - def _make_descr__cmp(name): - def descr__cmp(self, space, w_other): - if not isinstance(w_other, W_Buffer): - return space.w_NotImplemented - # xxx not the most efficient implementation - str1 = self.buf.as_str() - str2 = w_other.buf.as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - descr__cmp.func_name = name - return descr__cmp - - descr_eq = _make_descr__cmp('eq') - descr_ne = _make_descr__cmp('ne') - descr_lt = _make_descr__cmp('lt') - descr_le = _make_descr__cmp('le') - descr_gt = _make_descr__cmp('gt') - descr_ge = _make_descr__cmp('ge') - - def descr_hash(self, space): - return space.wrap(compute_hash(self.buf.as_str())) - - def descr_mul(self, space, w_times): - # xxx not the most efficient implementation - w_string = space.wrapbytes(self.buf.as_str()) - # use the __mul__ method instead of space.mul() so that we - # return NotImplemented instead of raising a TypeError - return space.call_method(w_string, '__mul__', w_times) - - def descr_repr(self, space): - if isinstance(self.buf, buffer.RWBuffer): - info = 'read-write buffer' - else: - info = 'read-only buffer' - addrstring = self.getaddrstring(space) - - return space.wrap("<%s for 0x%s, size %d>" % - (info, addrstring, self.buf.getlength())) - -W_Buffer.typedef = TypeDef( - "buffer", - __doc__ = """\ -buffer(object [, offset[, size]]) - -Create a new buffer object which references the given object. -The buffer will reference a slice of the target object from the -start of the object (or at the specified offset). The slice will -extend to the end of the target object (or with the specified size). -""", - __new__ = interp2app(W_Buffer.descr_new_buffer), - __len__ = interp2app(W_Buffer.descr_len), - __getitem__ = interp2app(W_Buffer.descr_getitem), - __setitem__ = interp2app(W_Buffer.descr_setitem), - __str__ = interp2app(W_Buffer.descr_str), - __add__ = interp2app(W_Buffer.descr_add), - __eq__ = interp2app(W_Buffer.descr_eq), - __ne__ = interp2app(W_Buffer.descr_ne), - __lt__ = interp2app(W_Buffer.descr_lt), - __le__ = interp2app(W_Buffer.descr_le), - __gt__ = interp2app(W_Buffer.descr_gt), - __ge__ = interp2app(W_Buffer.descr_ge), - __hash__ = interp2app(W_Buffer.descr_hash), - __mul__ = interp2app(W_Buffer.descr_mul), - __rmul__ = interp2app(W_Buffer.descr_mul), - __repr__ = interp2app(W_Buffer.descr_repr), -) -W_Buffer.typedef.acceptable_as_base_class = False - - class W_MemoryView(W_Root): """Implement the built-in 'memoryview' type as a wrapper around an interp-level buffer. diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -25,7 +25,7 @@ from pypy.objspace.std.iterobject import W_AbstractSeqIterObject from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.longobject import W_LongObject, newlong -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.memoryview import W_MemoryView from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.objectobject import W_ObjectObject from pypy.objspace.std.iterobject import W_SeqIterObject @@ -341,7 +341,7 @@ return W_SeqIterObject(w_obj) def newbuffer(self, w_obj): - return W_Buffer(w_obj) + return W_MemoryView(w_obj) def type(self, w_obj): jit.promote(w_obj.__class__) From noreply at buildbot.pypy.org Wed Mar 19 05:58:05 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 19 Mar 2014 05:58:05 +0100 (CET) Subject: [pypy-commit] pypy py3k: try to get away away with this simply subclassing MemoryView Message-ID: <20140319045805.173641C0166@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70081:8b3f43ab9322 Date: 2014-03-18 21:57 -0700 http://bitbucket.org/pypy/pypy/changeset/8b3f43ab9322/ Log: try to get away away with this simply subclassing MemoryView diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -4,7 +4,7 @@ from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray -from pypy.objspace.std.memoryview import W_Buffer +from pypy.objspace.std.memoryview import W_MemoryView from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi @@ -42,9 +42,9 @@ # Override the typedef to narrow down the interface that's exposed to app-level -class MiniBuffer(W_Buffer): +class MiniBuffer(W_MemoryView): def __init__(self, buffer, keepalive=None): - W_Buffer.__init__(self, buffer) + W_MemoryView.__init__(self, buffer) self.keepalive = keepalive MiniBuffer.typedef = TypeDef( From noreply at buildbot.pypy.org Wed Mar 19 06:11:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 19 Mar 2014 06:11:33 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140319051133.BA69D1C0166@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70082:1cf13b13b4a1 Date: 2014-03-18 22:08 -0700 http://bitbucket.org/pypy/pypy/changeset/1cf13b13b4a1/ Log: merge default diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -194,6 +194,11 @@ return None def buffer_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.buffer_w(space) self._typed_unwrap_error(space, "buffer") def bytes_w(self, space): diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -1,7 +1,6 @@ """ Buffer protocol support. """ -from pypy.interpreter.error import OperationError from rpython.rlib.objectmodel import import_from_mixin @@ -71,8 +70,8 @@ assert 0 <= start <= stop return self.value[start:stop] return "".join([self.value[start + i*step] for i in xrange(size)]) +# ____________________________________________________________ -# ____________________________________________________________ class SubBufferMixin(object): _attrs_ = ['buffer', 'offset', 'size'] @@ -98,7 +97,8 @@ if start == stop: return '' # otherwise, adding self.offset might make them # out of bounds - return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) + return self.buffer.getslice(self.offset + start, self.offset + stop, + step, size) class SubBuffer(Buffer): diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -4,7 +4,6 @@ from pypy.interpreter.typedef import TypeDef from rpython.rlib.rstring import UnicodeBuilder, StringBuilder from rpython.tool.sourcetools import func_with_new_name -from rpython.rlib import jit def create_builder(name, strtype, builder_cls): diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,4 +1,3 @@ -from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import unwrap_spec, interp2app diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -80,7 +80,6 @@ return (cfield.ctype, cfield.offset) def _copy_from_same(self, cdata, w_ob): - space = self.space if isinstance(w_ob, cdataobj.W_CData): if w_ob.ctype is self and self.size >= 0: misc._raw_memcopy(w_ob._cdata, cdata, self.size) diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -1,4 +1,3 @@ -import weakref from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import keepalive_until_here, specialize -from rpython.rlib.rarithmetic import r_uint, r_ulonglong, is_signed_integer_type +from rpython.rlib.rarithmetic import r_uint, r_ulonglong from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -1,4 +1,3 @@ - """ The ffi for rpython, need to be imported for side effects """ @@ -8,8 +7,6 @@ from rpython.rtyper.extfunc import register_external from pypy.module._minimal_curses import interp_curses from rpython.translator.tool.cbuild import ExternalCompilationInfo -from sys import platform -import os.path # We cannot trust ncurses5-config, it's broken in various ways in # various versions. For example it might not list -ltinfo even though diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -3,7 +3,6 @@ from pypy.interpreter.pycode import PyCode from pypy.interpreter.function import Function, Method from pypy.interpreter.module import Module -from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pytraceback import PyTraceback from pypy.interpreter.generator import GeneratorIteratorWithDel from rpython.rlib.objectmodel import instantiate diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py --- a/pypy/module/_pypyjson/interp_decoder.py +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -1,13 +1,10 @@ import sys -import math from rpython.rlib.rstring import StringBuilder from rpython.rlib.objectmodel import specialize from rpython.rlib import rfloat, runicode from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter import unicodehelper -from rpython.rtyper.annlowlevel import llstr, hlunicode OVF_DIGITS = len(str(sys.maxint)) @@ -30,7 +27,7 @@ Internally it's implemented at the level of low-level helpers, to avoid the extra copy we would need if we take the actual slice first. - + No bound checking is done, use carefully. """ from rpython.rtyper.annlowlevel import llstr, hlunicode @@ -226,7 +223,6 @@ def decode_array(self, i): w_list = self.space.newlist([]) start = i - count = 0 i = self.skip_whitespace(start) if self.ll_chars[i] == ']': self.pos = i+1 diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py --- a/pypy/module/_rawffi/callback.py +++ b/pypy/module/_rawffi/callback.py @@ -2,7 +2,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr +from pypy.module._rawffi.interp_rawffi import write_ptr from pypy.module._rawffi.structure import W_Structure from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp, unwrap_value, unpack_argshapes, got_libffi_error) diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -15,7 +15,7 @@ from pypy.module._rawffi.interp_rawffi import size_alignment from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib import clibffi, rgc -from rpython.rlib.rarithmetic import intmask, signedtype, widen, r_uint, \ +from rpython.rlib.rarithmetic import intmask, signedtype, r_uint, \ r_ulonglong from rpython.rtyper.lltypesystem import lltype, rffi diff --git a/pypy/module/_rawffi/tracker.py b/pypy/module/_rawffi/tracker.py --- a/pypy/module/_rawffi/tracker.py +++ b/pypy/module/_rawffi/tracker.py @@ -1,4 +1,3 @@ - """ The file that keeps track about freed/kept-alive objects allocated by _rawffi. Used for debugging ctypes """ diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1,25 +1,8 @@ import sys -import py -import py.test - - -## class AppTestSimpleArray: -## spaceconfig = dict(usemodules=('array',)) -## def setup_class(cls): -## cls.w_simple_array = cls.space.appexec([], """(): -## import array -## return array.simple_array -## """) - -## def test_simple(self): -## a = self.simple_array(10) -## a[5] = 7.42 -## assert a[5] == 7.42 +import pytest class BaseArrayTests: - - def test_ctor(self): assert len(self.array('i')) == 0 @@ -545,7 +528,6 @@ assert not a > 2*a assert not a >= 2*a - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) @@ -779,7 +761,6 @@ assert img[3, 25] == 3 * 9 - def test_override_from(self): class mya(self.array): def fromlist(self, lst): @@ -862,41 +843,41 @@ def test_assign_object_with_special_methods(self): from array import array - + class Num(object): def __float__(self): return 5.25 - + def __int__(self): return 7 - + class NotNum(object): pass - + class Silly(object): def __float__(self): return None - + def __int__(self): - return None + return None class OldNum: def __float__(self): return 6.25 - + def __int__(self): return 8 - + class OldNotNum: pass - + class OldSilly: def __float__(self): return None - + def __int__(self): return None - + for tc in 'bBhHiIlL': a = array(tc, [0]) raises(TypeError, a.__setitem__, 0, 1.0) @@ -914,7 +895,7 @@ a = array(tc, [0]) a[0] = 1.0 a[0] = 1 - a[0] = Num() + a[0] = Num() assert a[0] == 5.25 raises(TypeError, a.__setitem__, NotNum()) a[0] = OldNum() @@ -922,11 +903,15 @@ raises(TypeError, a.__setitem__, OldNotNum()) raises(TypeError, a.__setitem__, Silly()) raises(TypeError, a.__setitem__, OldSilly()) - + a = array('u', 'hi') a[0] = 'b' assert a[0] == 'b' - + + a = array('u', u'hi') + a[0] = u'b' + assert a[0] == u'b' + def test_bytearray(self): a = self.array('u', 'hi') b = self.array('u') @@ -940,15 +925,13 @@ assert repr(a) == "array('u', {!r})".format(s) assert eval(repr(a), {'array': self.array}) == a - class DontTestCPythonsOwnArray(BaseArrayTests): - def setup_class(cls): import array cls.array = array.array import struct cls.struct = struct - cls.tempfile = str(py.test.ensuretemp('array').join('tmpfile')) + cls.tempfile = str(pytest.ensuretemp('array').join('tmpfile')) cls.maxint = sys.maxint @@ -961,7 +944,7 @@ return array.array """) cls.w_tempfile = cls.space.wrap( - str(py.test.ensuretemp('array').join('tmpfile'))) + str(pytest.ensuretemp('array').join('tmpfile'))) cls.w_maxint = cls.space.wrap(sys.maxint) def test_buffer_info(self): @@ -1028,11 +1011,11 @@ def test_getitem_only_ints(self): class MyInt(object): - def __init__(self, x): - self.x = x + def __init__(self, x): + self.x = x - def __int__(self): - return self.x + def __int__(self): + return self.x a = self.array('i', [1, 2, 3, 4, 5, 6]) raises(TypeError, "a[MyInt(0)]") diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,7 +1,5 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase -import py -import sys class AppTestArrayModule(AppTestCpythonExtensionBase): enable_leak_checking = False @@ -21,7 +19,7 @@ module = self.import_module(name='array') arr = module.array('i', [1,2,3]) sum = 0 - for i in arr: + for i in arr: sum += i assert sum == 6 @@ -60,4 +58,3 @@ b'\x02\0\0\0' b'\x03\0\0\0' b'\x04\0\0\0') - diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -6,7 +6,7 @@ Py_MARSHAL_VERSION = 2 - at unwrap_spec(w_version = WrappedDefault(Py_MARSHAL_VERSION)) + at unwrap_spec(w_version=WrappedDefault(Py_MARSHAL_VERSION)) def dump(space, w_data, w_f, w_version): """Write the 'data' object into the open file 'f'.""" # XXX: before py3k, we special-cased W_File to use a more performant @@ -22,7 +22,7 @@ finally: writer.finished() - at unwrap_spec(w_version = WrappedDefault(Py_MARSHAL_VERSION)) + at unwrap_spec(w_version=WrappedDefault(Py_MARSHAL_VERSION)) def dumps(space, w_data, w_version): """Return the string that would have been written to a file by dump(data, file).""" @@ -221,10 +221,15 @@ def dump_w_obj(self, w_obj): space = self.space - if (space.type(w_obj).is_heaptype() and - space.lookup(w_obj, "__buffer__") is None): - w_err = space.wrap("only builtins can be marshaled") - raise OperationError(space.w_ValueError, w_err) + if space.type(w_obj).is_heaptype(): + try: + buf = space.buffer_w(w_obj) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + self.raise_exc("unmarshallable object") + else: + w_obj = space.newbuffer(buf) try: self.put_w_obj(w_obj) except rstackovf.StackOverflow: diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -2,6 +2,8 @@ class AppTestMarshal: + spaceconfig = {'usemodules': ['array']} + def setup_class(cls): tmpfile = udir.join('AppTestMarshal.tmp') cls.w_tmpfile = cls.space.wrap(str(tmpfile)) @@ -171,9 +173,20 @@ import marshal types = (float, complex, int, tuple, list, dict, set, frozenset) for cls in types: + print cls class subtype(cls): pass - raises(ValueError, marshal.dumps, subtype) + exc = raises(ValueError, marshal.dumps, subtype) + assert str(exc.value) == 'unmarshallable object' + exc = raises(ValueError, marshal.dumps, subtype()) + assert str(exc.value) == 'unmarshallable object' + + def test_valid_subtypes(self): + import marshal + from array import array + class subtype(array): + pass + assert marshal.dumps(subtype('c', 'test')) == marshal.dumps(array('c', 'test')) def test_bad_typecode(self): import marshal @@ -188,7 +201,8 @@ class AppTestSmallLong(AppTestMarshal): - spaceconfig = {"objspace.std.withsmalllong": True} + spaceconfig = AppTestMarshal.spaceconfig.copy() + spaceconfig["objspace.std.withsmalllong"] = True def setup_class(cls): from pypy.interpreter import gateway diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -3,7 +3,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from rpython.rtyper.lltypesystem import lltype -from rpython.rlib.rarithmetic import ovfcheck_float_to_int, intmask +from rpython.rlib.rarithmetic import intmask from rpython.rlib import rposix from rpython.translator.tool.cbuild import ExternalCompilationInfo import os diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -2,7 +2,6 @@ Version numbers exposed by PyPy through the 'sys' module. """ import os -import re from rpython.translator.platform import platform from pypy.interpreter import gateway diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -11,7 +11,6 @@ """ from pypy.interpreter import function -from pypy.objspace.descroperation import object_getattribute from rpython.rlib import jit from pypy.objspace.std.mapdict import LOOKUP_METHOD_mapdict, \ LOOKUP_METHOD_mapdict_fill_cache_method @@ -36,7 +35,6 @@ if space.config.objspace.std.withmapdict and not jit.we_are_jitted(): # mapdict has an extra-fast version of this function - from pypy.objspace.std.mapdict import LOOKUP_METHOD_mapdict if LOOKUP_METHOD_mapdict(f, nameindex, w_obj): return @@ -79,7 +77,7 @@ n_kwargs = (oparg >> 8) & 0xff w_self = f.peekvalue(n_args + (2 * n_kwargs)) n = n_args + (w_self is not None) - + if not n_kwargs: w_callable = f.peekvalue(n_args + (2 * n_kwargs) + 1) try: @@ -98,7 +96,7 @@ key = f.space.str_w(w_key) keywords[n_kwargs] = key keywords_w[n_kwargs] = w_value - + arguments = f.popvalues(n) # includes w_self if it is not None args = f.argument_factory(arguments, keywords, keywords_w, None, None) if w_self is None: diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -140,7 +140,7 @@ for key, cell in iterator()] def clear(self, w_dict): - iterator = self.unerase(w_dict.dstorage).clear() + self.unerase(w_dict.dstorage).clear() self.mutated() def popitem(self, w_dict): diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -1,4 +1,5 @@ -from pypy.interpreter import gateway +import math + from pypy.interpreter.error import OperationError from pypy.objspace.std import newformat from pypy.objspace.std.intobject import W_IntObject @@ -12,8 +13,6 @@ from rpython.rlib import jit, rcomplex from rpython.rlib.rarithmetic import intmask, r_ulonglong -import math - HASH_IMAG = 1000003 diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -1,7 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.stdtypedef import GetSetProperty, StdTypeDef from pypy.objspace.std.stdtypedef import StdObjSpaceMultiMethod from pypy.objspace.std.unicodeobject import unicode_to_decimal_w diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1092,7 +1092,7 @@ w_clone.setup_iterator() # spool until we have the same pos while w_clone.pos < self.pos: - w_obj = w_clone.next_entry() + w_clone.next_entry() w_clone.pos += 1 stuff = [w_clone.next_entry() for i in range(w_clone.pos, w_clone.len)] w_res = space.newlist(stuff) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -1,7 +1,6 @@ -from pypy.objspace.std.model import registerimplementation, W_Object -from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.dictmultiobject import W_DictMultiObject, create_iterator_classes -from pypy.objspace.std.dictmultiobject import DictStrategy +#from pypy.objspace.std.model import registerimplementation, W_Object +#from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.dictmultiobject import DictStrategy, create_iterator_classes from pypy.objspace.std.typeobject import unwrap_cell from pypy.interpreter.error import OperationError, oefmt @@ -9,7 +8,6 @@ class DictProxyStrategy(DictStrategy): - erase, unerase = rerased.new_erasing_pair("dictproxy") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -52,7 +50,6 @@ w_type.dict_w[key] = w_value def setdefault(self, w_dict, w_key, w_default): - space = self.space w_result = self.getitem(w_dict, w_key) if w_result is not None: return w_result diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -1,14 +1,15 @@ +import math import operator from pypy.interpreter.error import OperationError, oefmt -from pypy.objspace.std import model, newformat +from pypy.objspace.std import newformat from pypy.objspace.std.floattype import float_typedef, W_AbstractFloatObject from pypy.objspace.std.intobject import HASH_BITS, HASH_MODULUS from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.longobject import W_LongObject, newlong_from_float from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.longobject import W_LongObject, newlong_from_float from rpython.rlib.rarithmetic import ( LONG_BIT, intmask, ovfcheck_float_to_int, r_uint) from rpython.rlib.rfloat import ( @@ -18,8 +19,6 @@ from rpython.rlib import rfloat from rpython.tool.sourcetools import func_with_new_name - -import math from pypy.objspace.std.intobject import W_IntObject HASH_INF = 314159 diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -5,12 +5,11 @@ translation this module uses rarithmetic.ovfcheck to explicitly check for overflows, something CPython does not do anymore. """ - import operator import sys from rpython.rlib import jit -from rpython.rlib.objectmodel import instantiate, import_from_mixin, specialize +from rpython.rlib.objectmodel import instantiate from rpython.rlib.rarithmetic import ( LONG_BIT, intmask, is_valid_int, ovfcheck, r_longlong, r_uint, string_to_int) @@ -23,7 +22,6 @@ from pypy.interpreter import typedef from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( WrappedDefault, applevel, interp2app, interpindirect2app, unwrap_spec) @@ -32,7 +30,6 @@ BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT) from pypy.objspace.std.stdtypedef import StdTypeDef - SENTINEL = object() HASH_BITS = 61 if sys.maxsize > 2 ** 31 - 1 else 31 @@ -40,7 +37,6 @@ class W_AbstractIntObject(W_Root): - __slots__ = () def is_w(self, space, w_other): diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -10,7 +10,7 @@ from pypy.interpreter.error import OperationError from pypy.objspace.std.register_all import register_all -from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_uint, intmask +from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_uint from pypy.objspace.std import model from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.interpreter.special import Ellipsis @@ -200,7 +200,6 @@ register(TYPE_BINARY_COMPLEX, unmarshal_Complex_bin) def marshal_w__Long(space, w_long, m): - from rpython.rlib.rbigint import rbigint from rpython.rlib.rarithmetic import r_ulonglong m.start(TYPE_LONG) SHIFT = 15 @@ -359,7 +358,6 @@ lng = u.atom_lng(tc) res = [None] * lng idx = 0 - space = u.space while idx < lng: res[idx] = unmarshal_str(u) idx += 1 diff --git a/pypy/objspace/std/multimethod.py b/pypy/objspace/std/multimethod.py --- a/pypy/objspace/std/multimethod.py +++ b/pypy/objspace/std/multimethod.py @@ -866,7 +866,6 @@ entryarray = CompressedArray(null_entry) indexarray = self.mrdtable.indexarray lst = self.mrdtable.list_of_types - indexline = [] def compress(typesprefix, typesnum): if len(typesprefix) == self.multimethod.arity: diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -466,7 +466,6 @@ if not got_align: self._align = "=" i += 1 - start_i = i self._width, i = _parse_int(self.space, spec, i, length) if length != i and spec[i] == ",": self._thousands_sep = True @@ -584,7 +583,6 @@ return space.wrap(self._pad(string)) def _get_locale(self, tp): - space = self.space if tp == "n": dec, thousands, grouping = rlocale.numeric_formatting() elif self._thousands_sep: @@ -681,12 +679,10 @@ grouping = self._loc_grouping min_width = spec.n_min_width grouping_state = 0 - count = 0 left = spec.n_digits n_ts = len(self._loc_thousands) need_separator = False done = False - groupings = len(grouping) previous = 0 while True: group = ord(grouping[grouping_state]) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -331,7 +331,6 @@ strdict=strdict, kwargs=kwargs) def newset(self): - from pypy.objspace.std.setobject import newset return W_SetObject(self, None) def newslice(self, w_start, w_end, w_step): diff --git a/pypy/objspace/std/proxy_helpers.py b/pypy/objspace/std/proxy_helpers.py --- a/pypy/objspace/std/proxy_helpers.py +++ b/pypy/objspace/std/proxy_helpers.py @@ -1,10 +1,8 @@ - """ Some transparent helpers, put here because of cyclic imports """ -from pypy.objspace.std.model import W_ANY, W_Object -from pypy.interpreter import baseobjspace +from pypy.objspace.std.model import W_ANY from pypy.interpreter.argument import Arguments from rpython.tool.sourcetools import func_with_new_name @@ -24,7 +22,7 @@ def function(space, w_transparent_list, __args__): args = __args__.prepend(space.wrap(op_name)) return space.call_args(w_transparent_list.w_controller, args) - + function = func_with_new_name(function, mm.name) mm.register(function, type_) @@ -32,14 +30,14 @@ def function(space, w_transparent_list, *args_w): args = Arguments(space, [space.wrap(op_name)] + list(args_w[:-1]) + args_w[-1]) return space.call_args(w_transparent_list.w_controller, args) - + function = func_with_new_name(function, mm.name) mm.register(function, type_, *([W_ANY] * (mm.arity - 1))) def install_mm_trampoline(type_, mm, is_local): classname = type_.__name__[2:] mm_name, op_name = create_mm_names(classname, mm, is_local) - + if ['__args__'] == mm.argnames_after: return install_general_args_trampoline(type_, mm, is_local, op_name) if ['args_w'] == mm.argnames_after: @@ -58,10 +56,10 @@ """ if mm.arity != 2: return False - + if len(mm.specialnames) != 2: return False - + # search over the signatures for signature in mm.signatures(): if signature == (type_.original, type_.original): @@ -69,21 +67,21 @@ return False def install_mm_special(type_, mm, is_local): - classname = type_.__name__[2:] + #classname = type_.__name__[2:] #mm_name, op_name = create_mm_names(classname, mm, is_local) - + def function(space, w_any, w_transparent_list): retval = space.call_function(w_transparent_list.w_controller, space.wrap(mm.specialnames[1]), w_any) return retval - + function = func_with_new_name(function, mm.specialnames[0]) - + mm.register(function, type_.typedef.any, type_) def register_type(type_): from pypy.objspace.std.stdtypedef import multimethods_defined_on - + for mm, is_local in multimethods_defined_on(type_.original): if not mm.name.startswith('__'): install_mm_trampoline(type_, mm, is_local) diff --git a/pypy/objspace/std/proxyobject.py b/pypy/objspace/std/proxyobject.py --- a/pypy/objspace/std/proxyobject.py +++ b/pypy/objspace/std/proxyobject.py @@ -1,15 +1,8 @@ - """ transparent list implementation """ - -from pypy.objspace.std.model import W_Object from pypy.interpreter.error import OperationError from pypy.interpreter import baseobjspace -#class W_Transparent(W_Object): -# def __init__(self, w_controller): -# self.controller = w_controller - def transparent_class(name, BaseCls): class W_Transparent(BaseCls): @@ -72,25 +65,3 @@ return W_Transparent W_Transparent = transparent_class('W_Transparent', baseobjspace.W_Root) -#W_TransparentObject = transparent_class('W_TransparentObject', W_Object) - -#from pypy.objspace.std.objecttype import object_typedef -#W_TransparentObject.typedef = object_typedef - -from pypy.interpreter.typedef import Function, GeneratorIterator, PyTraceback, \ - PyFrame, PyCode - -class W_TransparentFunction(W_Transparent): - typedef = Function.typedef - -class W_TransparentTraceback(W_Transparent): - typedef = PyTraceback.typedef - -class W_TransparentCode(W_Transparent): - typedef = PyCode.typedef - -class W_TransparentFrame(W_Transparent): - typedef = PyFrame.typedef - -class W_TransparentGenerator(W_Transparent): - typedef = GeneratorIterator.typedef diff --git a/pypy/objspace/std/slicetype.py b/pypy/objspace/std/slicetype.py --- a/pypy/objspace/std/slicetype.py +++ b/pypy/objspace/std/slicetype.py @@ -1,4 +1,4 @@ -from pypy.interpreter import baseobjspace, gateway +from pypy.interpreter import gateway from pypy.interpreter.typedef import GetSetProperty from pypy.objspace.std.stdtypedef import StdTypeDef, SMM from pypy.objspace.std.register_all import register_all diff --git a/pypy/objspace/std/stdtypedef.py b/pypy/objspace/std/stdtypedef.py --- a/pypy/objspace/std/stdtypedef.py +++ b/pypy/objspace/std/stdtypedef.py @@ -1,8 +1,7 @@ -from pypy.interpreter import gateway, baseobjspace, argument +from pypy.interpreter import gateway, baseobjspace from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.typedef import TypeDef, GetSetProperty, Member -from pypy.interpreter.typedef import descr_get_dict, descr_set_dict -from pypy.interpreter.typedef import descr_del_dict +from pypy.interpreter.typedef import TypeDef, GetSetProperty, \ + descr_get_dict, descr_set_dict, descr_del_dict from pypy.interpreter.baseobjspace import SpaceCache from pypy.objspace.std import model from pypy.objspace.std.model import StdObjSpaceMultiMethod diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -446,7 +446,6 @@ def descr_split(self, space, w_sep=None, maxsplit=-1): res = [] value = self._val(space) - length = len(value) if space.is_none(w_sep): res = split(value, maxsplit=maxsplit) return self._newlist_unwrapped(space, res) diff --git a/pypy/objspace/std/transparent.py b/pypy/objspace/std/transparent.py --- a/pypy/objspace/std/transparent.py +++ b/pypy/objspace/std/transparent.py @@ -1,14 +1,30 @@ - """ transparent.py - Several transparent proxy helpers """ - from pypy.interpreter import gateway from pypy.interpreter.error import OperationError, oefmt -from pypy.objspace.std.proxyobject import * +from pypy.interpreter.typedef import Function, GeneratorIterator, PyTraceback, \ + PyFrame, PyCode +from pypy.objspace.std.proxyobject import W_Transparent from pypy.objspace.std.typeobject import W_TypeObject -from rpython.rlib.objectmodel import r_dict from rpython.rlib.unroll import unrolling_iterable + +class W_TransparentFunction(W_Transparent): + typedef = Function.typedef + +class W_TransparentTraceback(W_Transparent): + typedef = PyTraceback.typedef + +class W_TransparentCode(W_Transparent): + typedef = PyCode.typedef + +class W_TransparentFrame(W_Transparent): + typedef = PyFrame.typedef + +class W_TransparentGenerator(W_Transparent): + typedef = GeneratorIterator.typedef + + class TypeCache(object): def __init__(self): self.cache = [] @@ -28,13 +44,10 @@ space.wrap(app_proxy_controller)) - def proxy(space, w_type, w_controller): """tproxy(typ, controller) -> obj Return something that looks like it is of type typ. Its behaviour is completely controlled by the controller.""" - from pypy.interpreter.typedef import Function, PyTraceback, PyFrame, \ - PyCode, GeneratorIterator if not space.is_true(space.callable(w_controller)): raise OperationError(space.w_TypeError, space.wrap("controller should be function")) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -2,12 +2,12 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.function import Function, StaticMethod -from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\ +from pypy.interpreter.typedef import weakref_descr, GetSetProperty, Member, \ descr_get_dict from pypy.interpreter.astcompiler.misc import mangle from pypy.objspace.std.model import W_Object from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.stdtypedef import std_dict_descr, issubtypedef, Member +from pypy.objspace.std.stdtypedef import std_dict_descr, issubtypedef from pypy.objspace.std.stdtypedef import StdTypeDef from rpython.rlib.jit import (promote, elidable_promote, we_are_jitted, diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -4,12 +4,12 @@ """ import os +from rpython.rlib import rposix +from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rstring import StringBuilder from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform as platform from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rlib.rarithmetic import r_uint, intmask -from rpython.rlib import rposix -from rpython.rlib.rstring import StringBuilder includes = ['stdio.h', 'sys/types.h'] if os.name == "posix": diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -170,7 +170,7 @@ cfile.write(r''' #include #include - #include + #include #ifdef __GNUC__ #define _dosmaperr mingw_dosmaperr #endif @@ -197,6 +197,7 @@ standalone=True) except (CompilationError, WindowsError): # Fallback for the mingw32 compiler + assert static_platform.name == 'mingw32' errors = { 2: 2, 3: 2, 4: 24, 5: 13, 6: 9, 7: 12, 8: 12, 9: 12, 10: 7, 11: 8, 15: 2, 16: 13, 17: 18, 18: 2, 19: 13, 20: 13, 21: 13, From noreply at buildbot.pypy.org Wed Mar 19 07:26:20 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 19 Mar 2014 07:26:20 +0100 (CET) Subject: [pypy-commit] pypy default: Kill W_MemoryView.descr_buffer() and move the comment to W_MemoryView.buffer_w(). Message-ID: <20140319062620.72D6F1D26D3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r70083:f2302918495d Date: 2014-03-19 07:25 +0100 http://bitbucket.org/pypy/pypy/changeset/f2302918495d/ Log: Kill W_MemoryView.descr_buffer() and move the comment to W_MemoryView.buffer_w(). diff --git a/pypy/objspace/std/memoryview.py b/pypy/objspace/std/memoryview.py --- a/pypy/objspace/std/memoryview.py +++ b/pypy/objspace/std/memoryview.py @@ -176,6 +176,14 @@ self.buf = buf def buffer_w(self, space): + """ + Note that memoryview() is very inconsistent in CPython: it does not + support the buffer interface but does support the new buffer + interface: as a result, it is possible to pass memoryview to + e.g. socket.send() but not to file.write(). For simplicity and + consistency, in PyPy memoryview DOES support buffer(), which means + that it is accepted in more places than CPython. + """ return self.buf @staticmethod @@ -229,17 +237,6 @@ buf = buffer.SubBuffer(buf, start, size) return W_MemoryView(buf) - def descr_buffer(self, space): - """ - Note that memoryview() is very inconsistent in CPython: it does not - support the buffer interface but does support the new buffer - interface: as a result, it is possible to pass memoryview to - e.g. socket.send() but not to file.write(). For simplicity and - consistency, in PyPy memoryview DOES support buffer(), which means - that it is accepted in more places than CPython. - """ - return space.wrap(self.buf) - def descr_tobytes(self, space): return space.wrap(self.as_str()) From noreply at buildbot.pypy.org Wed Mar 19 07:28:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 07:28:15 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: intermediate Message-ID: <20140319062815.CD4D91D26D3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1072:5b46d0dcbbfd Date: 2014-03-19 07:28 +0100 http://bitbucket.org/pypy/stmgc/changeset/5b46d0dcbbfd/ Log: intermediate diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -14,7 +14,7 @@ #define THREAD_STARTS 300 // how many restarts of threads #define PREBUILT_ROOTS 3 #define MAXROOTS 1000 -#define FORKS 3 +#define FORKS 1 // SUPPORT struct node_s; @@ -339,9 +339,22 @@ if (p == (objptr_t)-1) { push_roots(); - stm_commit_transaction(); - if (arg) { + if (arg == NULL) { /* common case */ + stm_commit_transaction(); + td.num_roots_at_transaction_start = td.num_roots; + if (get_rand(100) < 98) { + STM_START_TRANSACTION(&stm_thread_local, here); + } else { + stm_start_inevitable_transaction(&stm_thread_local); + } + td.num_roots = td.num_roots_at_transaction_start; + p = NULL; + pop_roots(); + reload_roots(); + } + else { + /* run a fork() inside the transaction */ printf("========== FORK =========\n"); arg = NULL; pid_t child = fork(); @@ -355,19 +368,10 @@ num_forked_children++; else num_forked_children = 0; + + pop_roots(); + p = NULL; } - - td.num_roots_at_transaction_start = td.num_roots; - - if (get_rand(100) < 98) { - STM_START_TRANSACTION(&stm_thread_local, here); - } else { - stm_start_inevitable_transaction(&stm_thread_local); - } - td.num_roots = td.num_roots_at_transaction_start; - p = NULL; - pop_roots(); - reload_roots(); } } stm_commit_transaction(); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -164,11 +164,13 @@ STM_SEGMENT->transaction_read_version = 1; } -void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) +void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf, + int already_got_the_lock) { - assert(!_stm_in_transaction(tl)); - - s_mutex_lock(); + if (!already_got_the_lock) { + assert(!_stm_in_transaction(tl)); + s_mutex_lock(); + } retry: if (jmpbuf == NULL) { @@ -447,7 +449,7 @@ /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ } -void stm_commit_transaction(void) +void _stm_commit_transaction(int keep_the_lock_at_the_end) { assert(!_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_RUNNING); @@ -506,7 +508,8 @@ _finish_transaction(); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ - s_mutex_unlock(); + if (!keep_the_lock_at_the_end) + s_mutex_unlock(); } void stm_abort_transaction(void) diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -9,6 +9,7 @@ static char *fork_big_copy = NULL; static stm_thread_local_t *fork_this_tl; +static bool fork_was_in_transaction; static char *setup_mmap(char *reason); /* forward, in setup.c */ static void do_or_redo_setup_after_fork(void); /* forward, in setup.c */ @@ -21,23 +22,19 @@ if (stm_object_pages == NULL) return; - /* This assumes that fork() is not called from transactions. - So far we attempt to check this by walking all stm_thread_local_t, + /* So far we attempt to check this by walking all stm_thread_local_t, marking the one from the current thread, and verifying that it's not running a transaction. This assumes that the stm_thread_local_t is just a __thread variable, so never changes threads. */ s_mutex_lock(); - mutex_pages_lock(); - dprintf(("forksupport_prepare: synchronized all threads\n")); + dprintf(("forksupport_prepare\n")); fork_this_tl = NULL; stm_thread_local_t *tl = stm_all_thread_locals; do { if (pthread_equal(*_get_cpth(tl), pthread_self())) { - if (_stm_in_transaction(tl)) - stm_fatalerror("fork(): cannot be used inside a transaction"); if (fork_this_tl != NULL) stm_fatalerror("fork(): found several stm_thread_local_t" " from the same thread"); @@ -48,6 +45,24 @@ if (fork_this_tl == NULL) stm_fatalerror("fork(): found no stm_thread_local_t from this thread"); + s_mutex_unlock(); + + /* Run a commit without releasing the mutex at the end; if necessary, + actually start a dummy inevitable transaction for this + */ + fork_was_in_transaction = _stm_in_transaction(fork_this_tl); + if (!fork_was_in_transaction) + stm_start_inevitable_transaction(fork_this_tl); + _stm_commit_transaction(/*keep_the_lock_at_the_end =*/ 1); + + printf("fork_was_in_transaction: %d\n" + "fork_this_tl->associated_segment_num: %d\n", + (int)fork_was_in_transaction, + (int)fork_this_tl->associated_segment_num); + + /* Note that the commit can still fail and abort, which should be fine */ + + mutex_pages_lock(); /* Make a new mmap at some other address, but of the same size as the standard mmap at stm_object_pages @@ -92,6 +107,8 @@ assert(fork_big_copy == NULL); fork_big_copy = big_copy; + + assert(_has_mutex()); } static void forksupport_parent(void) @@ -99,6 +116,9 @@ if (stm_object_pages == NULL) return; + assert(_is_tl_registered(fork_this_tl)); + assert(_has_mutex()); + /* In the parent, after fork(), we can simply forget about the big copy that we made for the child. */ @@ -109,37 +129,32 @@ dprintf(("forksupport_parent: continuing to run\n")); mutex_pages_unlock(); - s_mutex_unlock(); + + printf("AFTER: fork_was_in_transaction: %d\n" + "fork_this_tl->associated_segment_num: %d\n", + (int)fork_was_in_transaction, + (int)fork_this_tl->associated_segment_num); + + if (fork_was_in_transaction) { + _stm_start_transaction(fork_this_tl, NULL, + /*already_got_the_lock =*/ 1); + } + else { + s_mutex_unlock(); + } } static void forksupport_child(void) { if (stm_object_pages == NULL) return; + abort(); - /* In the child, first unregister all other stm_thread_local_t, - mostly as a way to free the memory used by the shadowstacks - */ + /* this new process contains no other thread, so we can + just release these locks early */ mutex_pages_unlock(); s_mutex_unlock(); - assert(fork_this_tl != NULL); - while (stm_all_thread_locals->next != stm_all_thread_locals) { - if (stm_all_thread_locals == fork_this_tl) - stm_unregister_thread_local(stm_all_thread_locals->next); - else - stm_unregister_thread_local(stm_all_thread_locals); - } - assert(stm_all_thread_locals == fork_this_tl); - - /* Restore a few things in the child: the new pthread_self(), and - the %gs register (although I suppose it should be preserved by - fork()) - */ - *_get_cpth(fork_this_tl) = pthread_self(); - set_gs_register(get_segment_base(fork_this_tl->associated_segment_num)); - fork_this_tl = NULL; - /* Move the copy of the mmap over the old one, overwriting it and thus freeing the old mapping in this process */ @@ -152,6 +167,24 @@ stm_fatalerror("after fork: mremap failed: %m"); fork_big_copy = NULL; + /* Unregister all other stm_thread_local_t, mostly as a way to free + the memory used by the shadowstacks + */ + assert(fork_this_tl != NULL); + while (stm_all_thread_locals->next != stm_all_thread_locals) { + if (stm_all_thread_locals == fork_this_tl) + stm_unregister_thread_local(stm_all_thread_locals->next); + else + stm_unregister_thread_local(stm_all_thread_locals); + } + assert(stm_all_thread_locals == fork_this_tl); + + /* Restore a few things: the new pthread_self(), and the %gs + register (although I suppose it should be preserved by fork()) + */ + *_get_cpth(fork_this_tl) = pthread_self(); + set_gs_register(get_segment_base(fork_this_tl->associated_segment_num)); + /* Call a subset of stm_teardown() / stm_setup() to free and recreate the necessary data in all segments, and to clean up some of the global data like the big arrays that don't make sense any @@ -172,6 +205,11 @@ pages_initialize_shared(start, stop - start); mutex_pages_unlock(); + /* Now restart the transaction if needed + */ + if (fork_was_in_transaction) + stm_start_inevitable_transaction(fork_this_tl); + dprintf(("forksupport_child: running one thread now\n")); } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -78,7 +78,8 @@ object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(const char*); -void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *); +void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *, int); +void _stm_commit_transaction(int); void _stm_collectable_safe_point(void); /* for tests, but also used in duhton: */ @@ -257,17 +258,19 @@ stm_jmpbuf_t). */ #define STM_START_TRANSACTION(tl, jmpbuf) ({ \ while (__builtin_setjmp(jmpbuf) == 1) { /*redo setjmp*/ } \ - _stm_start_transaction(tl, &jmpbuf); \ + _stm_start_transaction(tl, &jmpbuf, 0); \ }) /* Start an inevitable transaction, if it's going to return from the current function immediately. */ static inline void stm_start_inevitable_transaction(stm_thread_local_t *tl) { - _stm_start_transaction(tl, NULL); + _stm_start_transaction(tl, NULL, 0); } /* Commit a transaction. */ -void stm_commit_transaction(void); +static inline void stm_commit_transaction(void) { + _stm_commit_transaction(0); +} /* Abort the currently running transaction. */ void stm_abort_transaction(void) __attribute__((noreturn)); From noreply at buildbot.pypy.org Wed Mar 19 07:34:20 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 19 Mar 2014 07:34:20 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fix translation. Message-ID: <20140319063420.9B0F21D2800@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k Changeset: r70084:34feb354a1da Date: 2014-03-19 07:33 +0100 http://bitbucket.org/pypy/pypy/changeset/34feb354a1da/ Log: Fix translation. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -197,7 +197,7 @@ w_impl = space.lookup(self, '__buffer__') if w_impl is not None: w_result = space.get_and_call_function(w_impl, self) - if space.isinstance_w(w_result, space.w_buffer): + if space.isinstance_w(w_result, space.w_memoryview): return w_result.buffer_w(space) self._typed_unwrap_error(space, "buffer") From noreply at buildbot.pypy.org Wed Mar 19 08:48:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 08:48:46 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: ... Message-ID: <20140319074846.D33A61C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1073:769b3113998d Date: 2014-03-19 08:48 +0100 http://bitbucket.org/pypy/stmgc/changeset/769b3113998d/ Log: ... diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -4,7 +4,7 @@ /* XXX this is currently not doing copy-on-write, but simply forces a - copy of all shared pages as soon as fork() is called. */ + copy of all pages as soon as fork() is called. */ static char *fork_big_copy = NULL; @@ -12,8 +12,6 @@ static bool fork_was_in_transaction; static char *setup_mmap(char *reason); /* forward, in setup.c */ -static void do_or_redo_setup_after_fork(void); /* forward, in setup.c */ -static void do_or_redo_teardown_after_fork(void); /* forward, in setup.c */ static pthread_t *_get_cpth(stm_thread_local_t *);/* forward, in setup.c */ @@ -31,37 +29,34 @@ dprintf(("forksupport_prepare\n")); - fork_this_tl = NULL; + stm_thread_local_t *this_tl = NULL; stm_thread_local_t *tl = stm_all_thread_locals; do { if (pthread_equal(*_get_cpth(tl), pthread_self())) { - if (fork_this_tl != NULL) + if (this_tl != NULL) stm_fatalerror("fork(): found several stm_thread_local_t" " from the same thread"); - fork_this_tl = tl; + this_tl = tl; } tl = tl->next; } while (tl != stm_all_thread_locals); - if (fork_this_tl == NULL) + if (this_tl == NULL) stm_fatalerror("fork(): found no stm_thread_local_t from this thread"); s_mutex_unlock(); - /* Run a commit without releasing the mutex at the end; if necessary, - actually start a dummy inevitable transaction for this - */ - fork_was_in_transaction = _stm_in_transaction(fork_this_tl); - if (!fork_was_in_transaction) - stm_start_inevitable_transaction(fork_this_tl); - _stm_commit_transaction(/*keep_the_lock_at_the_end =*/ 1); + bool was_in_transaction = _stm_in_transaction(this_tl); + if (was_in_transaction) { + stm_become_inevitable("fork"); + /* Note that the line above can still fail and abort, which should + be fine */ + } + else { + stm_start_inevitable_transaction(this_tl); + } - printf("fork_was_in_transaction: %d\n" - "fork_this_tl->associated_segment_num: %d\n", - (int)fork_was_in_transaction, - (int)fork_this_tl->associated_segment_num); - - /* Note that the commit can still fail and abort, which should be fine */ - + s_mutex_lock(); + synchronize_all_threads(); mutex_pages_lock(); /* Make a new mmap at some other address, but of the same size as @@ -79,7 +74,7 @@ } /* Copy all the data from the two ranges of objects (large, small) - into the new mmap --- but only the shared objects + into the new mmap */ uintptr_t pagenum, endpagenum; pagenum = END_NURSERY_PAGE; /* starts after the nursery */ @@ -100,15 +95,32 @@ endpagenum = NB_PAGES; } - pagecopy(big_copy + pagenum * 4096UL, - stm_object_pages + pagenum * 4096UL); + char *src = stm_object_pages + pagenum * 4096UL; + char *dst = big_copy + pagenum * 4096UL; + pagecopy(dst, src); + + struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; + if (ps.by_segment != 0) { + long j; + for (j = 0; j < NB_SEGMENTS; j++) { + src += NB_PAGES * 4096UL; + dst += NB_PAGES * 4096UL; + if (ps.by_segment & (1 << j)) { + pagecopy(dst, src); + } + } + } pagenum++; } assert(fork_big_copy == NULL); fork_big_copy = big_copy; + fork_this_tl = this_tl; + fork_was_in_transaction = was_in_transaction; assert(_has_mutex()); + printf("forksupport_prepare: from %p %p\n", fork_this_tl, + fork_this_tl->creating_pthread[0]); } static void forksupport_parent(void) @@ -116,8 +128,10 @@ if (stm_object_pages == NULL) return; + printf("forksupport_parent: continuing to run %p %p\n", fork_this_tl, + fork_this_tl->creating_pthread[0]); + assert(_has_mutex()); assert(_is_tl_registered(fork_this_tl)); - assert(_has_mutex()); /* In the parent, after fork(), we can simply forget about the big copy that we made for the child. @@ -125,22 +139,33 @@ assert(fork_big_copy != NULL); munmap(fork_big_copy, TOTAL_MEMORY); fork_big_copy = NULL; + bool was_in_transaction = fork_was_in_transaction; + + mutex_pages_unlock(); + s_mutex_unlock(); + + if (!was_in_transaction) { + stm_commit_transaction(); + } dprintf(("forksupport_parent: continuing to run\n")); +} - mutex_pages_unlock(); +static void fork_abort_thread(long i) +{ + struct stm_priv_segment_info_s *pr = get_priv_segment(i); + dprintf(("forksupport_child: abort in seg%ld\n", i)); + assert(pr->pub.running_thread->associated_segment_num == i); + assert(pr->transaction_state == TS_REGULAR); + set_gs_register(get_segment_base(i)); - printf("AFTER: fork_was_in_transaction: %d\n" - "fork_this_tl->associated_segment_num: %d\n", - (int)fork_was_in_transaction, - (int)fork_this_tl->associated_segment_num); - - if (fork_was_in_transaction) { - _stm_start_transaction(fork_this_tl, NULL, - /*already_got_the_lock =*/ 1); - } - else { - s_mutex_unlock(); + stm_jmpbuf_t jmpbuf; + if (__builtin_setjmp(jmpbuf) == 0) { + pr->pub.jmpbuf_ptr = &jmpbuf; +#ifndef NDEBUG + pr->running_pthread = pthread_self(); +#endif + stm_abort_transaction(); } } @@ -148,7 +173,6 @@ { if (stm_object_pages == NULL) return; - abort(); /* this new process contains no other thread, so we can just release these locks early */ @@ -170,7 +194,6 @@ /* Unregister all other stm_thread_local_t, mostly as a way to free the memory used by the shadowstacks */ - assert(fork_this_tl != NULL); while (stm_all_thread_locals->next != stm_all_thread_locals) { if (stm_all_thread_locals == fork_this_tl) stm_unregister_thread_local(stm_all_thread_locals->next); @@ -179,37 +202,58 @@ } assert(stm_all_thread_locals == fork_this_tl); - /* Restore a few things: the new pthread_self(), and the %gs - register (although I suppose it should be preserved by fork()) - */ - *_get_cpth(fork_this_tl) = pthread_self(); - set_gs_register(get_segment_base(fork_this_tl->associated_segment_num)); - - /* Call a subset of stm_teardown() / stm_setup() to free and - recreate the necessary data in all segments, and to clean up some - of the global data like the big arrays that don't make sense any - more. We keep other things like the smallmalloc and largemalloc - internal state. - */ - do_or_redo_teardown_after_fork(); - do_or_redo_setup_after_fork(); - /* Make all pages shared again. */ - mutex_pages_lock(); - uintptr_t start = END_NURSERY_PAGE; - uintptr_t stop = (uninitialized_page_start - stm_object_pages) / 4096UL; - pages_initialize_shared(start, stop - start); - start = (uninitialized_page_stop - stm_object_pages) / 4096UL; - stop = NB_PAGES; - pages_initialize_shared(start, stop - start); - mutex_pages_unlock(); + uintptr_t pagenum, endpagenum; + pagenum = END_NURSERY_PAGE; /* starts after the nursery */ + endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; - /* Now restart the transaction if needed + while (1) { + if (UNLIKELY(pagenum == endpagenum)) { + /* we reach this point usually twice, because there are + more pages after 'uninitialized_page_stop' */ + if (endpagenum == NB_PAGES) + break; /* done */ + pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; + endpagenum = NB_PAGES; + if (endpagenum == NB_PAGES) + break; /* done */ + } + + struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; + long j; + for (j = 0; j < NB_SEGMENTS; j++) { + if (!(ps.by_segment & (1 << j))) { + _page_do_reshare(j + 1, pagenum); + } + } + pagenum++; + } + + /* Force the interruption of other running segments */ - if (fork_was_in_transaction) - stm_start_inevitable_transaction(fork_this_tl); + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pr = get_priv_segment(i); + if (pr->pub.running_thread != NULL && + pr->pub.running_thread != fork_this_tl) { + fork_abort_thread(i); + } + } + /* Restore a few things: the new pthread_self(), and the %gs + register */ + int segnum = fork_this_tl->associated_segment_num; + assert(1 <= segnum && segnum <= NB_SEGMENTS); + *_get_cpth(fork_this_tl) = pthread_self(); + set_gs_register(get_segment_base(segnum)); + assert(STM_SEGMENT->segment_num == segnum); + + if (!fork_was_in_transaction) { + stm_commit_transaction(); + } + + /* Done */ dprintf(("forksupport_child: running one thread now\n")); } diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -146,6 +146,13 @@ mutex_pages_unlock(); } +static void _page_do_reshare(long segnum, uintptr_t pagenum) +{ + char *segment_base = get_segment_base(segnum); + d_remap_file_pages(segment_base + pagenum * 4096UL, + 4096, pagenum); +} + static void page_reshare(uintptr_t pagenum) { struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -38,6 +38,7 @@ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); static void page_reshare(uintptr_t pagenum); +static void _page_do_reshare(long segnum, uintptr_t pagenum); /* Note: don't ever do "mutex_pages_lock(); mutex_lock()" in that order */ static void mutex_pages_lock(void); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -69,7 +69,7 @@ /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; - long creating_pthread[4]; + void *creating_pthread[2]; } stm_thread_local_t; /* this should use llvm's coldcc calling convention, From noreply at buildbot.pypy.org Wed Mar 19 08:59:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 08:59:40 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: Found out I was missing copying this Message-ID: <20140319075940.781271C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1074:d4ff527cda67 Date: 2014-03-19 08:59 +0100 http://bitbucket.org/pypy/stmgc/changeset/d4ff527cda67/ Log: Found out I was missing copying this diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -15,6 +15,17 @@ static pthread_t *_get_cpth(stm_thread_local_t *);/* forward, in setup.c */ +static bool page_is_null(char *p) +{ + long *q = (long *)p; + long i; + for (i = 0; i < 4096 / sizeof(long); i++) + if (q[i] != 0) + return false; + return true; +} + + static void forksupport_prepare(void) { if (stm_object_pages == NULL) @@ -64,13 +75,25 @@ */ char *big_copy = setup_mmap("stmgc's fork support"); - /* Copy each of the segment infos into the new mmap + /* Copy each of the segment infos into the new mmap, nurseries, + and associated read markers */ long i; for (i = 1; i <= NB_SEGMENTS; i++) { - struct stm_priv_segment_info_s *src = get_priv_segment(i); - char *dst = big_copy + (((char *)src) - stm_object_pages); - *(struct stm_priv_segment_info_s *)dst = *src; + char *src, *dst; + struct stm_priv_segment_info_s *psrc = get_priv_segment(i); + dst = big_copy + (((char *)psrc) - stm_object_pages); + *(struct stm_priv_segment_info_s *)dst = *psrc; + + src = get_segment_base(i) + FIRST_READMARKER_PAGE * 4096UL; + dst = big_copy + (src - stm_object_pages); + long j; + for (j = 0; j < END_NURSERY_PAGE - FIRST_READMARKER_PAGE; j++) { + if (!page_is_null(src)) + pagecopy(dst, src); + src += 4096; + dst += 4096; + } } /* Copy all the data from the two ranges of objects (large, small) From noreply at buildbot.pypy.org Wed Mar 19 09:10:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 09:10:43 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: Seems to work now; clean up intermediate attempts Message-ID: <20140319081043.539B01C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1075:75893b92af4e Date: 2014-03-19 09:10 +0100 http://bitbucket.org/pypy/stmgc/changeset/75893b92af4e/ Log: Seems to work now; clean up intermediate attempts diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -10,11 +10,11 @@ #include "stmgc.h" #define NUMTHREADS 3 -#define STEPS_PER_THREAD 300 -#define THREAD_STARTS 300 // how many restarts of threads +#define STEPS_PER_THREAD 500 +#define THREAD_STARTS 1000 // how many restarts of threads #define PREBUILT_ROOTS 3 #define MAXROOTS 1000 -#define FORKS 1 +#define FORKS 3 // SUPPORT struct node_s; diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -164,13 +164,11 @@ STM_SEGMENT->transaction_read_version = 1; } -void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf, - int already_got_the_lock) +void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) { - if (!already_got_the_lock) { - assert(!_stm_in_transaction(tl)); - s_mutex_lock(); - } + assert(!_stm_in_transaction(tl)); + + s_mutex_lock(); retry: if (jmpbuf == NULL) { @@ -449,7 +447,7 @@ /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ } -void _stm_commit_transaction(int keep_the_lock_at_the_end) +void stm_commit_transaction(void) { assert(!_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_RUNNING); @@ -508,8 +506,7 @@ _finish_transaction(); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ - if (!keep_the_lock_at_the_end) - s_mutex_unlock(); + s_mutex_unlock(); } void stm_abort_transaction(void) diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -25,10 +25,6 @@ static void teardown_pages(void) { memset(&pages_ctl, 0, sizeof(pages_ctl)); -} - -static void teardown_pages_1(void) -{ memset(pages_privatized, 0, sizeof(pages_privatized)); } diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -14,8 +14,25 @@ return result; } -static void do_or_redo_setup_after_fork(void) +void stm_setup(void) { + /* Check that some values are acceptable */ + assert(NB_SEGMENTS <= NB_SEGMENTS_MAX); + assert(4096 <= ((uintptr_t)STM_SEGMENT)); + assert((uintptr_t)STM_SEGMENT == (uintptr_t)STM_PSEGMENT); + assert(((uintptr_t)STM_PSEGMENT) + sizeof(*STM_PSEGMENT) <= 8192); + assert(2 <= FIRST_READMARKER_PAGE); + assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START); + assert(READMARKER_START < READMARKER_END); + assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); + assert(FIRST_OBJECT_PAGE < NB_PAGES); + assert((NB_PAGES * 4096UL) >> 8 <= (FIRST_OBJECT_PAGE * 4096UL) >> 4); + assert((END_NURSERY_PAGE * 4096UL) >> 8 <= + (FIRST_READMARKER_PAGE * 4096UL)); + assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); + + stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); + /* The segment 0 is not used to run transactions, but contains the shared copy of the pages. We mprotect all pages before so that accesses fail, up to and including the pages corresponding to the @@ -68,43 +85,25 @@ so a null read marker means "not read" whatever the current transaction_read_version is. */ - setup_nursery(); -} - -void stm_setup(void) -{ - /* Check that some values are acceptable */ - assert(NB_SEGMENTS <= NB_SEGMENTS_MAX); - assert(4096 <= ((uintptr_t)STM_SEGMENT)); - assert((uintptr_t)STM_SEGMENT == (uintptr_t)STM_PSEGMENT); - assert(((uintptr_t)STM_PSEGMENT) + sizeof(*STM_PSEGMENT) <= 8192); - assert(2 <= FIRST_READMARKER_PAGE); - assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START); - assert(READMARKER_START < READMARKER_END); - assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); - assert(FIRST_OBJECT_PAGE < NB_PAGES); - assert((NB_PAGES * 4096UL) >> 8 <= (FIRST_OBJECT_PAGE * 4096UL) >> 4); - assert((END_NURSERY_PAGE * 4096UL) >> 8 <= - (FIRST_READMARKER_PAGE * 4096UL)); - assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); - - stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); - - do_or_redo_setup_after_fork(); setup_sync(); + setup_nursery(); setup_gcpage(); setup_pages(); setup_forksupport(); } -static void do_or_redo_teardown_after_fork(void) +void stm_teardown(void) { + /* This function is called during testing, but normal programs don't + need to call it. */ + assert(!_has_mutex()); + long i; for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); - LIST_FREE(pr->objects_pointing_to_nursery); - LIST_FREE(pr->large_overflow_objects); + assert(pr->objects_pointing_to_nursery == NULL); + assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); list_free(pr->young_weakrefs); list_free(pr->old_weakrefs); @@ -113,22 +112,10 @@ tree_free(pr->callbacks_on_abort); } - teardown_core(); - teardown_sync_1(); - teardown_pages_1(); -} - -void stm_teardown(void) -{ - /* This function is called during testing, but normal programs don't - need to call it. */ - assert(!_has_mutex()); - - do_or_redo_teardown_after_fork(); - munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; + teardown_core(); teardown_sync(); teardown_gcpage(); teardown_pages(); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -36,10 +36,6 @@ char reserved[192]; } sync_ctl __attribute__((aligned(64))); -#ifndef NDEBUG -static bool _safe_points_requested = false; -#endif - static void setup_sync(void) { @@ -63,15 +59,8 @@ if (pthread_cond_destroy(&sync_ctl.cond[i]) != 0) stm_fatalerror("cond destroy: %m"); } -} -static void teardown_sync_1(void) -{ memset(&sync_ctl, 0, sizeof(sync_ctl)); -#ifndef NDEBUG - _safe_points_requested = false; -#endif - pause_signalled = false; } #ifndef NDEBUG @@ -263,6 +252,10 @@ /************************************************************/ +#ifndef NDEBUG +static bool _safe_points_requested = false; +#endif + static void signal_everybody_to_pause_running(void) { assert(_safe_points_requested == false); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -78,8 +78,7 @@ object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(const char*); -void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *, int); -void _stm_commit_transaction(int); +void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *); void _stm_collectable_safe_point(void); /* for tests, but also used in duhton: */ @@ -258,19 +257,17 @@ stm_jmpbuf_t). */ #define STM_START_TRANSACTION(tl, jmpbuf) ({ \ while (__builtin_setjmp(jmpbuf) == 1) { /*redo setjmp*/ } \ - _stm_start_transaction(tl, &jmpbuf, 0); \ + _stm_start_transaction(tl, &jmpbuf); \ }) /* Start an inevitable transaction, if it's going to return from the current function immediately. */ static inline void stm_start_inevitable_transaction(stm_thread_local_t *tl) { - _stm_start_transaction(tl, NULL, 0); + _stm_start_transaction(tl, NULL); } /* Commit a transaction. */ -static inline void stm_commit_transaction(void) { - _stm_commit_transaction(0); -} +void stm_commit_transaction(void); /* Abort the currently running transaction. */ void stm_abort_transaction(void) __attribute__((noreturn)); From noreply at buildbot.pypy.org Wed Mar 19 09:17:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 09:17:22 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: Re-hide these printfs Message-ID: <20140319081722.901911C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1076:62a1be4e540f Date: 2014-03-19 09:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/62a1be4e540f/ Log: Re-hide these printfs diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -142,8 +142,8 @@ fork_was_in_transaction = was_in_transaction; assert(_has_mutex()); - printf("forksupport_prepare: from %p %p\n", fork_this_tl, - fork_this_tl->creating_pthread[0]); + dprintf(("forksupport_prepare: from %p %p\n", fork_this_tl, + fork_this_tl->creating_pthread[0])); } static void forksupport_parent(void) @@ -151,8 +151,8 @@ if (stm_object_pages == NULL) return; - printf("forksupport_parent: continuing to run %p %p\n", fork_this_tl, - fork_this_tl->creating_pthread[0]); + dprintf(("forksupport_parent: continuing to run %p %p\n", fork_this_tl, + fork_this_tl->creating_pthread[0])); assert(_has_mutex()); assert(_is_tl_registered(fork_this_tl)); From noreply at buildbot.pypy.org Wed Mar 19 09:23:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 09:23:20 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/75893b92af4e (c7-fork) Message-ID: <20140319082320.3357C1C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70085:22b83e351878 Date: 2014-03-19 09:22 +0100 http://bitbucket.org/pypy/pypy/changeset/22b83e351878/ Log: import stmgc/75893b92af4e (c7-fork) diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -885ed3b0f6ee +75893b92af4e diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -158,7 +158,7 @@ MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) { /* fall-back */ #if STM_TESTS - stm_fatalerror("reset_transaction_read_version: %m\n"); + stm_fatalerror("reset_transaction_read_version: %m"); #endif memset(readmarkers, 0, NB_READMARKER_PAGES * 4096UL); } @@ -273,7 +273,7 @@ assert(_has_mutex_pages()); assert(!_is_young(obj)); - char *segment_base = get_segment(source_segment_num)->segment_base; + char *segment_base = get_segment_base(source_segment_num); uintptr_t start = (uintptr_t)obj; uintptr_t first_page = start / 4096UL; struct object_s *realobj = (struct object_s *) @@ -519,19 +519,17 @@ static void reset_modified_from_other_segments(int segment_num) { - /* pull the right versions from other threads in order + /* pull the right versions from segment 0 in order to reset our pages as part of an abort. Note that this function is also sometimes called from contention.c to clean up the state of a different thread, when we would really like it to be aborted now and it is suspended at a safe-point. - */ struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); - long remote_num = !segment_num; char *local_base = get_segment_base(segment_num); - char *remote_base = get_segment_base(remote_num); + char *remote_base = get_segment_base(0); LIST_FOREACH_R( pseg->modified_old_objects, diff --git a/rpython/translator/stm/src_stm/stm/fprintcolor.c b/rpython/translator/stm/src_stm/stm/fprintcolor.c --- a/rpython/translator/stm/src_stm/stm/fprintcolor.c +++ b/rpython/translator/stm/src_stm/stm/fprintcolor.c @@ -9,8 +9,8 @@ char buffer[2048]; va_list ap; int result; - int size = (int)sprintf(buffer, "\033[%dm[%lx] ", dprintfcolor(), - (long)pthread_self()); + int size = (int)sprintf(buffer, "\033[%dm[%d,%lx] ", dprintfcolor(), + (int)getpid(), (long)pthread_self()); assert(size >= 0); va_start(ap, format); @@ -42,6 +42,7 @@ va_start(ap, format); vfprintf(stderr, format, ap); + fprintf(stderr, "\n"); va_end(ap); abort(); diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -61,7 +61,7 @@ return; out_of_memory: - stm_fatalerror("out of memory!\n"); /* XXX */ + stm_fatalerror("out of memory!"); /* XXX */ } static char *_allocate_small_slowpath(uint64_t size) @@ -86,7 +86,7 @@ /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = _stm_large_malloc(size); if (addr == NULL) - stm_fatalerror("not enough memory!\n"); + stm_fatalerror("not enough memory!"); if (addr + size > uninitialized_page_start) { uintptr_t npages; @@ -94,7 +94,7 @@ npages += GCPAGE_NUM_PAGES; if (uninitialized_page_stop - uninitialized_page_start < npages * 4096UL) { - stm_fatalerror("out of memory!\n"); /* XXX */ + stm_fatalerror("out of memory!"); /* XXX */ } setup_N_pages(uninitialized_page_start, npages); uninitialized_page_start += npages * 4096UL; diff --git a/rpython/translator/stm/src_stm/stm/largemalloc.c b/rpython/translator/stm/src_stm/stm/largemalloc.c --- a/rpython/translator/stm/src_stm/stm/largemalloc.c +++ b/rpython/translator/stm/src_stm/stm/largemalloc.c @@ -274,8 +274,10 @@ /* unlink the following chunk */ mscan->d.next->prev = mscan->d.prev; mscan->d.prev->next = mscan->d.next; - assert((mscan->prev_size = (size_t)-258, 1)); /* 0xfffffffffffffefe */ - assert((mscan->size = (size_t)-515, 1)); /* 0xfffffffffffffdfd */ +#ifndef NDEBUG + mscan->prev_size = (size_t)-258; /* 0xfffffffffffffefe */ + mscan->size = (size_t)-515; /* 0xfffffffffffffdfd */ +#endif /* merge the two chunks */ assert(fsize == fscan->prev_size); diff --git a/rpython/translator/stm/src_stm/stm/list.c b/rpython/translator/stm/src_stm/stm/list.c --- a/rpython/translator/stm/src_stm/stm/list.c +++ b/rpython/translator/stm/src_stm/stm/list.c @@ -13,7 +13,7 @@ uintptr_t initial_allocation = 32; struct list_s *lst = malloc(LIST_SETSIZE(initial_allocation)); if (lst == NULL) - stm_fatalerror("out of memory in list_create\n"); /* XXX */ + stm_fatalerror("out of memory in list_create"); /* XXX */ lst->count = 0; lst->last_allocated = initial_allocation - 1; @@ -25,7 +25,7 @@ nalloc = LIST_OVERCNT(nalloc); lst = realloc(lst, LIST_SETSIZE(nalloc)); if (lst == NULL) - stm_fatalerror("out of memory in _list_grow\n"); /* XXX */ + stm_fatalerror("out of memory in _list_grow"); /* XXX */ lst->last_allocated = nalloc - 1; return lst; @@ -94,7 +94,7 @@ //fprintf(stderr, "growth: %ld\n", newalloc); char *newitems = malloc(newalloc); if (newitems == NULL) { - stm_fatalerror("out of memory!\n"); /* XXX */ + stm_fatalerror("out of memory!"); /* XXX */ } newtree.raw_start = newitems; newtree.raw_current = newitems; diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -33,10 +33,6 @@ } } -static void teardown_nursery(void) -{ -} - static inline bool _is_in_nursery(object_t *obj) { assert((uintptr_t)obj >= NURSERY_START); diff --git a/rpython/translator/stm/src_stm/stm/pages.c b/rpython/translator/stm/src_stm/stm/pages.c --- a/rpython/translator/stm/src_stm/stm/pages.c +++ b/rpython/translator/stm/src_stm/stm/pages.c @@ -94,7 +94,7 @@ int res = remap_file_pages(addr, size, 0, pgoff, 0); if (UNLIKELY(res < 0)) - stm_fatalerror("remap_file_pages: %m\n"); + stm_fatalerror("remap_file_pages: %m"); } static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count) @@ -104,6 +104,8 @@ segment 0. */ uintptr_t i; assert(_has_mutex_pages()); + if (count == 0) + return; for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); d_remap_file_pages(segment_base + pagenum * 4096UL, @@ -141,6 +143,13 @@ mutex_pages_unlock(); } +static void _page_do_reshare(long segnum, uintptr_t pagenum) +{ + char *segment_base = get_segment_base(segnum); + d_remap_file_pages(segment_base + pagenum * 4096UL, + 4096, pagenum); +} + static void page_reshare(uintptr_t pagenum) { struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; @@ -150,7 +159,7 @@ for (j = 0; j < NB_SEGMENTS; j++) { if (ps.by_segment & (1 << j)) { /* Page 'pagenum' is private in segment 'j + 1'. Reshare */ - char *segment_base = stm_object_pages + NB_PAGES * 4096UL * (j+1); + char *segment_base = get_segment_base(j + 1); madvise(segment_base + pagenum * 4096UL, 4096, MADV_DONTNEED); d_remap_file_pages(segment_base + pagenum * 4096UL, diff --git a/rpython/translator/stm/src_stm/stm/pages.h b/rpython/translator/stm/src_stm/stm/pages.h --- a/rpython/translator/stm/src_stm/stm/pages.h +++ b/rpython/translator/stm/src_stm/stm/pages.h @@ -39,7 +39,9 @@ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); static void page_reshare(uintptr_t pagenum); +static void _page_do_reshare(long segnum, uintptr_t pagenum); +/* Note: don't ever do "mutex_pages_lock(); mutex_lock()" in that order */ static void mutex_pages_lock(void); static void mutex_pages_unlock(void); static bool _has_mutex_pages(void) __attribute__((unused)); diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -4,6 +4,17 @@ #endif +static char *setup_mmap(char *reason) +{ + char *result = mmap(NULL, TOTAL_MEMORY, + PROT_READ | PROT_WRITE, + MAP_PAGES_FLAGS, -1, 0); + if (result == MAP_FAILED) + stm_fatalerror("%s failed: %m\n", reason); + + return result; +} + void stm_setup(void) { /* Check that some values are acceptable */ @@ -21,13 +32,9 @@ (FIRST_READMARKER_PAGE * 4096UL)); assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); - stm_object_pages = mmap(NULL, TOTAL_MEMORY, - PROT_READ | PROT_WRITE, - MAP_PAGES_FLAGS, -1, 0); - if (stm_object_pages == MAP_FAILED) - stm_fatalerror("initial stm_object_pages mmap() failed: %m\n"); + stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); - /* The segment 0 is not used to run transactions, but to contain the + /* The segment 0 is not used to run transactions, but contains the shared copy of the pages. We mprotect all pages before so that accesses fail, up to and including the pages corresponding to the nurseries of the other segments. */ @@ -84,6 +91,7 @@ setup_nursery(); setup_gcpage(); setup_pages(); + setup_forksupport(); } void stm_teardown(void) @@ -111,11 +119,10 @@ teardown_core(); teardown_sync(); teardown_gcpage(); - teardown_nursery(); teardown_pages(); } -void _init_shadow_stack(stm_thread_local_t *tl) +static void _init_shadow_stack(stm_thread_local_t *tl) { struct stm_shadowentry_s *s = (struct stm_shadowentry_s *) malloc(SHADOW_STACK_SIZE * sizeof(struct stm_shadowentry_s)); @@ -124,13 +131,18 @@ tl->shadowstack_base = s; } -void _done_shadow_stack(stm_thread_local_t *tl) +static void _done_shadow_stack(stm_thread_local_t *tl) { free(tl->shadowstack_base); tl->shadowstack = NULL; tl->shadowstack_base = NULL; } +static pthread_t *_get_cpth(stm_thread_local_t *tl) +{ + assert(sizeof(pthread_t) <= sizeof(tl->creating_pthread)); + return (pthread_t *)(tl->creating_pthread); +} void stm_register_thread_local(stm_thread_local_t *tl) { @@ -154,6 +166,7 @@ numbers automatically. */ num = (num % NB_SEGMENTS) + 1; tl->associated_segment_num = num; + *_get_cpth(tl) = pthread_self(); _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); s_mutex_unlock(); @@ -162,6 +175,7 @@ void stm_unregister_thread_local(stm_thread_local_t *tl) { s_mutex_lock(); + assert(tl->prev != NULL); assert(tl->next != NULL); _done_shadow_stack(tl); if (tl == stm_all_thread_locals) { diff --git a/rpython/translator/stm/src_stm/stm/sync.c b/rpython/translator/stm/src_stm/stm/sync.c --- a/rpython/translator/stm/src_stm/stm/sync.c +++ b/rpython/translator/stm/src_stm/stm/sync.c @@ -41,24 +41,24 @@ static void setup_sync(void) { if (pthread_mutex_init(&sync_ctl.global_mutex, NULL) != 0) - stm_fatalerror("mutex initialization: %m\n"); + stm_fatalerror("mutex initialization: %m"); long i; for (i = 0; i < _C_TOTAL; i++) { if (pthread_cond_init(&sync_ctl.cond[i], NULL) != 0) - stm_fatalerror("cond initialization: %m\n"); + stm_fatalerror("cond initialization: %m"); } } static void teardown_sync(void) { if (pthread_mutex_destroy(&sync_ctl.global_mutex) != 0) - stm_fatalerror("mutex destroy: %m\n"); + stm_fatalerror("mutex destroy: %m"); long i; for (i = 0; i < _C_TOTAL; i++) { if (pthread_cond_destroy(&sync_ctl.cond[i]) != 0) - stm_fatalerror("cond destroy: %m\n"); + stm_fatalerror("cond destroy: %m"); } memset(&sync_ctl, 0, sizeof(sync_ctl)); @@ -75,14 +75,14 @@ static void set_gs_register(char *value) { if (UNLIKELY(syscall(SYS_arch_prctl, ARCH_SET_GS, (uint64_t)value) != 0)) - stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m\n"); + stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m"); } static inline void s_mutex_lock(void) { assert(!_has_mutex_here); if (UNLIKELY(pthread_mutex_lock(&sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_mutex_lock: %m\n"); + stm_fatalerror("pthread_mutex_lock: %m"); assert((_has_mutex_here = true, 1)); } @@ -90,32 +90,32 @@ { assert(_has_mutex_here); if (UNLIKELY(pthread_mutex_unlock(&sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_mutex_unlock: %m\n"); + stm_fatalerror("pthread_mutex_unlock: %m"); assert((_has_mutex_here = false, 1)); } static inline void cond_wait(enum cond_type_e ctype) { #ifdef STM_NO_COND_WAIT - stm_fatalerror("*** cond_wait/%d called!\n", (int)ctype); + stm_fatalerror("*** cond_wait/%d called!", (int)ctype); #endif assert(_has_mutex_here); if (UNLIKELY(pthread_cond_wait(&sync_ctl.cond[ctype], &sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_cond_wait/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_wait/%d: %m", (int)ctype); } static inline void cond_signal(enum cond_type_e ctype) { if (UNLIKELY(pthread_cond_signal(&sync_ctl.cond[ctype]) != 0)) - stm_fatalerror("pthread_cond_signal/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_signal/%d: %m", (int)ctype); } static inline void cond_broadcast(enum cond_type_e ctype) { if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.cond[ctype]) != 0)) - stm_fatalerror("pthread_cond_broadcast/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_broadcast/%d: %m", (int)ctype); } /************************************************************/ diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c --- a/rpython/translator/stm/src_stm/stmgc.c +++ b/rpython/translator/stm/src_stm/stmgc.c @@ -24,6 +24,7 @@ #include "stm/largemalloc.c" #include "stm/nursery.c" #include "stm/sync.c" +#include "stm/forksupport.c" #include "stm/setup.c" #include "stm/hash_id.c" #include "stm/core.c" diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -70,6 +70,7 @@ /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; + void *creating_pthread[2]; } stm_thread_local_t; /* this should use llvm's coldcc calling convention, @@ -130,8 +131,10 @@ /* ==================== PUBLIC API ==================== */ -/* Number of segments (i.e. how many threads can be executed in - parallel, in maximum). +/* Number of segments (i.e. how many transactions can be executed in + parallel, in maximum). If you try to start transactions in more + threads than the number of segments, it will block, waiting for the + next segment to become free. */ #define STM_NB_SEGMENTS 4 From noreply at buildbot.pypy.org Wed Mar 19 09:23:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 09:23:21 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Convenience Message-ID: <20140319082321.E3CB21C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70086:9b2f37131a9b Date: 2014-03-19 09:22 +0100 http://bitbucket.org/pypy/pypy/changeset/9b2f37131a9b/ Log: Convenience diff --git a/rpython/translator/stm/import_stmgc.py b/rpython/translator/stm/import_stmgc.py --- a/rpython/translator/stm/import_stmgc.py +++ b/rpython/translator/stm/import_stmgc.py @@ -37,6 +37,7 @@ path.chmod(0444) # stmgc_dest.join('revision').write('%s\n' % rev) + print rev if __name__ == '__main__': if len(sys.argv) != 2: From noreply at buildbot.pypy.org Wed Mar 19 10:14:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 10:14:55 +0100 (CET) Subject: [pypy-commit] cffi default: (Lisandro Dalcin, part of issue 141) Message-ID: <20140319091455.61EF91D2511@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1482:56f591248800 Date: 2014-03-19 10:14 +0100 http://bitbucket.org/cffi/cffi/changeset/56f591248800/ Log: (Lisandro Dalcin, part of issue 141) Check for "__thread" by asking distutils specifically for the same compiler as the one it will use later. diff --git a/c/check__thread.c b/c/check__thread.c deleted file mode 100644 --- a/c/check__thread.c +++ /dev/null @@ -1,1 +0,0 @@ -__thread int some_threadlocal_variable_42; diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -42,25 +42,14 @@ resultlist[:] = res def ask_supports_thread(): - if sys.platform == "darwin": - sys.stderr.write("Note: will not use '__thread' in the C code\n") - sys.stderr.write("This is for OS/X-specific reasons: confusion " - "between 'cc' versus 'gcc' (see issue 123)\n") - return - import distutils.errors - from distutils.ccompiler import new_compiler - compiler = new_compiler(force=1) - try: - compiler.compile(['c/check__thread.c']) - except distutils.errors.CompileError: + from distutils.core import Distribution + config = Distribution().get_command_obj('config') + ok = config.try_compile('__thread int some_threadlocal_variable_42;') + if ok: + define_macros.append(('USE__THREAD', None)) + else: sys.stderr.write("Note: will not use '__thread' in the C code\n") sys.stderr.write("The above error message can be safely ignored\n") - else: - define_macros.append(('USE__THREAD', None)) - try: - os.unlink('c/check__thread.o') - except OSError: - pass def use_pkg_config(): _ask_pkg_config(include_dirs, '--cflags-only-I', '-I', sysroot=True) From noreply at buildbot.pypy.org Wed Mar 19 10:21:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 10:21:09 +0100 (CET) Subject: [pypy-commit] stmgc c7-fork: Close branch ready for merging Message-ID: <20140319092109.7DC7A1D2511@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-fork Changeset: r1077:ede4b4739ad4 Date: 2014-03-19 10:20 +0100 http://bitbucket.org/pypy/stmgc/changeset/ede4b4739ad4/ Log: Close branch ready for merging From noreply at buildbot.pypy.org Wed Mar 19 10:21:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 10:21:11 +0100 (CET) Subject: [pypy-commit] stmgc default: hg merge c7-fork Message-ID: <20140319092111.0F49F1D2511@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1078:0e7ff5304cac Date: 2014-03-19 10:20 +0100 http://bitbucket.org/pypy/stmgc/changeset/0e7ff5304cac/ Log: hg merge c7-fork Add pthread_atfork() to manually de-share the mapping, mounted with MAP_SHARED. diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -4,6 +4,8 @@ #include #include #include +#include +#include #include "stmgc.h" @@ -12,12 +14,14 @@ #define THREAD_STARTS 1000 // how many restarts of threads #define PREBUILT_ROOTS 3 #define MAXROOTS 1000 +#define FORKS 3 // SUPPORT struct node_s; typedef TLPREFIX struct node_s node_t; typedef node_t* nodeptr_t; typedef object_t* objptr_t; +int num_forked_children = 0; struct node_s { struct object_s hdr; @@ -335,19 +339,39 @@ if (p == (objptr_t)-1) { push_roots(); - stm_commit_transaction(); - td.num_roots_at_transaction_start = td.num_roots; + if (arg == NULL) { /* common case */ + stm_commit_transaction(); + td.num_roots_at_transaction_start = td.num_roots; + if (get_rand(100) < 98) { + STM_START_TRANSACTION(&stm_thread_local, here); + } else { + stm_start_inevitable_transaction(&stm_thread_local); + } + td.num_roots = td.num_roots_at_transaction_start; + p = NULL; + pop_roots(); + reload_roots(); + } + else { + /* run a fork() inside the transaction */ + printf("========== FORK =========\n"); + arg = NULL; + pid_t child = fork(); + printf("=== in process %d thread %lx, fork() returned %d\n", + (int)getpid(), (long)pthread_self(), (int)child); + if (child == -1) { + fprintf(stderr, "fork() error: %m\n"); + abort(); + } + if (child != 0) + num_forked_children++; + else + num_forked_children = 0; - if (get_rand(100) < 98) { - STM_START_TRANSACTION(&stm_thread_local, here); - } else { - stm_start_inevitable_transaction(&stm_thread_local); + pop_roots(); + p = NULL; } - td.num_roots = td.num_roots_at_transaction_start; - p = NULL; - pop_roots(); - reload_roots(); } } stm_commit_transaction(); @@ -427,8 +451,24 @@ assert(status == 0); printf("thread finished\n"); if (thread_starts) { + long forkbase = NUMTHREADS * THREAD_STARTS / (FORKS + 1); + long _fork = (thread_starts % forkbase) == 0; thread_starts--; - newthread(demo_random, NULL); + newthread(demo_random, (void *)_fork); + } + } + + for (i = 0; i < num_forked_children; i++) { + pid_t child = wait(&status); + if (child == -1) + perror("wait"); + printf("From %d: child %d terminated with exit status %d\n", + (int)getpid(), (int)child, status); + if (WIFEXITED(status) && WEXITSTATUS(status) == 0) + ; + else { + printf("*** error from the child ***\n"); + return 1; } } diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c new file mode 100644 --- /dev/null +++ b/c7/stm/forksupport.c @@ -0,0 +1,295 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +/* XXX this is currently not doing copy-on-write, but simply forces a + copy of all pages as soon as fork() is called. */ + + +static char *fork_big_copy = NULL; +static stm_thread_local_t *fork_this_tl; +static bool fork_was_in_transaction; + +static char *setup_mmap(char *reason); /* forward, in setup.c */ +static pthread_t *_get_cpth(stm_thread_local_t *);/* forward, in setup.c */ + + +static bool page_is_null(char *p) +{ + long *q = (long *)p; + long i; + for (i = 0; i < 4096 / sizeof(long); i++) + if (q[i] != 0) + return false; + return true; +} + + +static void forksupport_prepare(void) +{ + if (stm_object_pages == NULL) + return; + + /* So far we attempt to check this by walking all stm_thread_local_t, + marking the one from the current thread, and verifying that it's not + running a transaction. This assumes that the stm_thread_local_t is just + a __thread variable, so never changes threads. + */ + s_mutex_lock(); + + dprintf(("forksupport_prepare\n")); + + stm_thread_local_t *this_tl = NULL; + stm_thread_local_t *tl = stm_all_thread_locals; + do { + if (pthread_equal(*_get_cpth(tl), pthread_self())) { + if (this_tl != NULL) + stm_fatalerror("fork(): found several stm_thread_local_t" + " from the same thread"); + this_tl = tl; + } + tl = tl->next; + } while (tl != stm_all_thread_locals); + + if (this_tl == NULL) + stm_fatalerror("fork(): found no stm_thread_local_t from this thread"); + s_mutex_unlock(); + + bool was_in_transaction = _stm_in_transaction(this_tl); + if (was_in_transaction) { + stm_become_inevitable("fork"); + /* Note that the line above can still fail and abort, which should + be fine */ + } + else { + stm_start_inevitable_transaction(this_tl); + } + + s_mutex_lock(); + synchronize_all_threads(); + mutex_pages_lock(); + + /* Make a new mmap at some other address, but of the same size as + the standard mmap at stm_object_pages + */ + char *big_copy = setup_mmap("stmgc's fork support"); + + /* Copy each of the segment infos into the new mmap, nurseries, + and associated read markers + */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *src, *dst; + struct stm_priv_segment_info_s *psrc = get_priv_segment(i); + dst = big_copy + (((char *)psrc) - stm_object_pages); + *(struct stm_priv_segment_info_s *)dst = *psrc; + + src = get_segment_base(i) + FIRST_READMARKER_PAGE * 4096UL; + dst = big_copy + (src - stm_object_pages); + long j; + for (j = 0; j < END_NURSERY_PAGE - FIRST_READMARKER_PAGE; j++) { + if (!page_is_null(src)) + pagecopy(dst, src); + src += 4096; + dst += 4096; + } + } + + /* Copy all the data from the two ranges of objects (large, small) + into the new mmap + */ + uintptr_t pagenum, endpagenum; + pagenum = END_NURSERY_PAGE; /* starts after the nursery */ + endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; + if (endpagenum < NB_PAGES) + endpagenum++; /* the next page too, because it might contain + data from largemalloc */ + + while (1) { + if (UNLIKELY(pagenum == endpagenum)) { + /* we reach this point usually twice, because there are + more pages after 'uninitialized_page_stop' */ + if (endpagenum == NB_PAGES) + break; /* done */ + pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; + pagenum--; /* the prev page too, because it does contain + data from largemalloc */ + endpagenum = NB_PAGES; + } + + char *src = stm_object_pages + pagenum * 4096UL; + char *dst = big_copy + pagenum * 4096UL; + pagecopy(dst, src); + + struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; + if (ps.by_segment != 0) { + long j; + for (j = 0; j < NB_SEGMENTS; j++) { + src += NB_PAGES * 4096UL; + dst += NB_PAGES * 4096UL; + if (ps.by_segment & (1 << j)) { + pagecopy(dst, src); + } + } + } + pagenum++; + } + + assert(fork_big_copy == NULL); + fork_big_copy = big_copy; + fork_this_tl = this_tl; + fork_was_in_transaction = was_in_transaction; + + assert(_has_mutex()); + dprintf(("forksupport_prepare: from %p %p\n", fork_this_tl, + fork_this_tl->creating_pthread[0])); +} + +static void forksupport_parent(void) +{ + if (stm_object_pages == NULL) + return; + + dprintf(("forksupport_parent: continuing to run %p %p\n", fork_this_tl, + fork_this_tl->creating_pthread[0])); + assert(_has_mutex()); + assert(_is_tl_registered(fork_this_tl)); + + /* In the parent, after fork(), we can simply forget about the big copy + that we made for the child. + */ + assert(fork_big_copy != NULL); + munmap(fork_big_copy, TOTAL_MEMORY); + fork_big_copy = NULL; + bool was_in_transaction = fork_was_in_transaction; + + mutex_pages_unlock(); + s_mutex_unlock(); + + if (!was_in_transaction) { + stm_commit_transaction(); + } + + dprintf(("forksupport_parent: continuing to run\n")); +} + +static void fork_abort_thread(long i) +{ + struct stm_priv_segment_info_s *pr = get_priv_segment(i); + dprintf(("forksupport_child: abort in seg%ld\n", i)); + assert(pr->pub.running_thread->associated_segment_num == i); + assert(pr->transaction_state == TS_REGULAR); + set_gs_register(get_segment_base(i)); + + stm_jmpbuf_t jmpbuf; + if (__builtin_setjmp(jmpbuf) == 0) { + pr->pub.jmpbuf_ptr = &jmpbuf; +#ifndef NDEBUG + pr->running_pthread = pthread_self(); +#endif + stm_abort_transaction(); + } +} + +static void forksupport_child(void) +{ + if (stm_object_pages == NULL) + return; + + /* this new process contains no other thread, so we can + just release these locks early */ + mutex_pages_unlock(); + s_mutex_unlock(); + + /* Move the copy of the mmap over the old one, overwriting it + and thus freeing the old mapping in this process + */ + assert(fork_big_copy != NULL); + assert(stm_object_pages != NULL); + void *res = mremap(fork_big_copy, TOTAL_MEMORY, TOTAL_MEMORY, + MREMAP_MAYMOVE | MREMAP_FIXED, + stm_object_pages); + if (res != stm_object_pages) + stm_fatalerror("after fork: mremap failed: %m"); + fork_big_copy = NULL; + + /* Unregister all other stm_thread_local_t, mostly as a way to free + the memory used by the shadowstacks + */ + while (stm_all_thread_locals->next != stm_all_thread_locals) { + if (stm_all_thread_locals == fork_this_tl) + stm_unregister_thread_local(stm_all_thread_locals->next); + else + stm_unregister_thread_local(stm_all_thread_locals); + } + assert(stm_all_thread_locals == fork_this_tl); + + /* Make all pages shared again. + */ + uintptr_t pagenum, endpagenum; + pagenum = END_NURSERY_PAGE; /* starts after the nursery */ + endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; + + while (1) { + if (UNLIKELY(pagenum == endpagenum)) { + /* we reach this point usually twice, because there are + more pages after 'uninitialized_page_stop' */ + if (endpagenum == NB_PAGES) + break; /* done */ + pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; + endpagenum = NB_PAGES; + if (endpagenum == NB_PAGES) + break; /* done */ + } + + struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; + long j; + for (j = 0; j < NB_SEGMENTS; j++) { + if (!(ps.by_segment & (1 << j))) { + _page_do_reshare(j + 1, pagenum); + } + } + pagenum++; + } + + /* Force the interruption of other running segments + */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pr = get_priv_segment(i); + if (pr->pub.running_thread != NULL && + pr->pub.running_thread != fork_this_tl) { + fork_abort_thread(i); + } + } + + /* Restore a few things: the new pthread_self(), and the %gs + register */ + int segnum = fork_this_tl->associated_segment_num; + assert(1 <= segnum && segnum <= NB_SEGMENTS); + *_get_cpth(fork_this_tl) = pthread_self(); + set_gs_register(get_segment_base(segnum)); + assert(STM_SEGMENT->segment_num == segnum); + + if (!fork_was_in_transaction) { + stm_commit_transaction(); + } + + /* Done */ + dprintf(("forksupport_child: running one thread now\n")); +} + + +static void setup_forksupport(void) +{ + static bool fork_support_ready = false; + + if (!fork_support_ready) { + int res = pthread_atfork(forksupport_prepare, forksupport_parent, + forksupport_child); + if (res != 0) + stm_fatalerror("pthread_atfork() failed: %m"); + fork_support_ready = true; + } +} diff --git a/c7/stm/fprintcolor.c b/c7/stm/fprintcolor.c --- a/c7/stm/fprintcolor.c +++ b/c7/stm/fprintcolor.c @@ -8,8 +8,8 @@ char buffer[2048]; va_list ap; int result; - int size = (int)sprintf(buffer, "\033[%dm[%lx] ", dprintfcolor(), - (long)pthread_self()); + int size = (int)sprintf(buffer, "\033[%dm[%d,%lx] ", dprintfcolor(), + (int)getpid(), (long)pthread_self()); assert(size >= 0); va_start(ap, format); diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -273,8 +273,10 @@ /* unlink the following chunk */ mscan->d.next->prev = mscan->d.prev; mscan->d.prev->next = mscan->d.next; - assert((mscan->prev_size = (size_t)-258, 1)); /* 0xfffffffffffffefe */ - assert((mscan->size = (size_t)-515, 1)); /* 0xfffffffffffffdfd */ +#ifndef NDEBUG + mscan->prev_size = (size_t)-258; /* 0xfffffffffffffefe */ + mscan->size = (size_t)-515; /* 0xfffffffffffffdfd */ +#endif /* merge the two chunks */ assert(fsize == fscan->prev_size); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -32,10 +32,6 @@ } } -static void teardown_nursery(void) -{ -} - static inline bool _is_in_nursery(object_t *obj) { assert((uintptr_t)obj >= NURSERY_START); diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -103,6 +103,8 @@ segment 0. */ uintptr_t i; assert(_has_mutex_pages()); + if (count == 0) + return; for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); d_remap_file_pages(segment_base + pagenum * 4096UL, @@ -140,6 +142,13 @@ mutex_pages_unlock(); } +static void _page_do_reshare(long segnum, uintptr_t pagenum) +{ + char *segment_base = get_segment_base(segnum); + d_remap_file_pages(segment_base + pagenum * 4096UL, + 4096, pagenum); +} + static void page_reshare(uintptr_t pagenum) { struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -38,7 +38,9 @@ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); static void page_reshare(uintptr_t pagenum); +static void _page_do_reshare(long segnum, uintptr_t pagenum); +/* Note: don't ever do "mutex_pages_lock(); mutex_lock()" in that order */ static void mutex_pages_lock(void); static void mutex_pages_unlock(void); static bool _has_mutex_pages(void) __attribute__((unused)); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -3,6 +3,17 @@ #endif +static char *setup_mmap(char *reason) +{ + char *result = mmap(NULL, TOTAL_MEMORY, + PROT_READ | PROT_WRITE, + MAP_PAGES_FLAGS, -1, 0); + if (result == MAP_FAILED) + stm_fatalerror("%s failed: %m\n", reason); + + return result; +} + void stm_setup(void) { /* Check that some values are acceptable */ @@ -20,13 +31,9 @@ (FIRST_READMARKER_PAGE * 4096UL)); assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); - stm_object_pages = mmap(NULL, TOTAL_MEMORY, - PROT_READ | PROT_WRITE, - MAP_PAGES_FLAGS, -1, 0); - if (stm_object_pages == MAP_FAILED) - stm_fatalerror("initial stm_object_pages mmap() failed: %m"); + stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); - /* The segment 0 is not used to run transactions, but to contain the + /* The segment 0 is not used to run transactions, but contains the shared copy of the pages. We mprotect all pages before so that accesses fail, up to and including the pages corresponding to the nurseries of the other segments. */ @@ -83,6 +90,7 @@ setup_nursery(); setup_gcpage(); setup_pages(); + setup_forksupport(); } void stm_teardown(void) @@ -110,11 +118,10 @@ teardown_core(); teardown_sync(); teardown_gcpage(); - teardown_nursery(); teardown_pages(); } -void _init_shadow_stack(stm_thread_local_t *tl) +static void _init_shadow_stack(stm_thread_local_t *tl) { struct stm_shadowentry_s *s = (struct stm_shadowentry_s *) malloc(SHADOW_STACK_SIZE * sizeof(struct stm_shadowentry_s)); @@ -123,13 +130,18 @@ tl->shadowstack_base = s; } -void _done_shadow_stack(stm_thread_local_t *tl) +static void _done_shadow_stack(stm_thread_local_t *tl) { free(tl->shadowstack_base); tl->shadowstack = NULL; tl->shadowstack_base = NULL; } +static pthread_t *_get_cpth(stm_thread_local_t *tl) +{ + assert(sizeof(pthread_t) <= sizeof(tl->creating_pthread)); + return (pthread_t *)(tl->creating_pthread); +} void stm_register_thread_local(stm_thread_local_t *tl) { @@ -153,6 +165,7 @@ numbers automatically. */ num = (num % NB_SEGMENTS) + 1; tl->associated_segment_num = num; + *_get_cpth(tl) = pthread_self(); _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); s_mutex_unlock(); @@ -161,6 +174,7 @@ void stm_unregister_thread_local(stm_thread_local_t *tl) { s_mutex_lock(); + assert(tl->prev != NULL); assert(tl->next != NULL); _done_shadow_stack(tl); if (tl == stm_all_thread_locals) { diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -23,6 +23,7 @@ #include "stm/largemalloc.c" #include "stm/nursery.c" #include "stm/sync.c" +#include "stm/forksupport.c" #include "stm/setup.c" #include "stm/hash_id.c" #include "stm/core.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -69,6 +69,7 @@ /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; + void *creating_pthread[2]; } stm_thread_local_t; /* this should use llvm's coldcc calling convention, From noreply at buildbot.pypy.org Wed Mar 19 10:51:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 10:51:24 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: tweaks Message-ID: <20140319095124.6CDC21C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70087:9846f4e476df Date: 2014-03-19 10:50 +0100 http://bitbucket.org/pypy/pypy/changeset/9846f4e476df/ Log: tweaks diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -71,7 +71,12 @@ v_struct = hop.spaceop.args[0] assert opname in ('setfield', 'setarrayitem', 'setinteriorfield', 'raw_store') - if var_needsgc(v_struct) and hop.spaceop not in self.clean_sets: + if not var_needsgc(v_struct): + if (var_needsgc(hop.spaceop.args[-1]) and + 'is_excdata' not in hop.spaceop.args[0].concretetype.TO._hints): + raise Exception("%s: GC pointer written into a non-GC location" + % (hop.spaceop,)) + elif hop.spaceop not in self.clean_sets: if self.in_stm_ignored: # detect if we're inside a 'stm_ignored' block and in # that case don't call stm_write(). This only works for diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -906,6 +906,13 @@ **cur = stm_setup_prebuilt(**cur); } } + +void pypy_stm_register_thread_local(void) +{ + stm_register_thread_local(&stm_thread_local); + stm_thread_local.mem_clear_on_abort = &pypy_g_ExcData; + stm_thread_local.mem_bytes_to_clear_on_abort = sizeof(pypy_g_ExcData); +} ''' def commondefs(defines): diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -464,7 +464,8 @@ ('exc_type', self.lltype_of_exception_type), ('exc_value', self.lltype_of_exception_value), hints={'stm_thread_local': True, - 'stm_dont_track_raw_accesses':True}) + 'stm_dont_track_raw_accesses':True, + 'is_excdata': True}) self.EXCDATA = EXCDATA exc_data = lltype.malloc(EXCDATA, immortal=True) diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -44,10 +44,7 @@ def stm_register_thread_local(funcgen, op): - return ( - 'stm_register_thread_local(&stm_thread_local);\n\t' - 'stm_thread_local.mem_clear_on_abort = &pypy_g_ExcData;\n\t' - 'stm_thread_local.mem_bytes_to_clear_on_abort = sizeof(pypy_g_ExcData);') + return 'pypy_stm_register_thread_local();' def stm_unregister_thread_local(funcgen, op): return 'stm_unregister_thread_local(&stm_thread_local);' diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -48,7 +48,7 @@ void pypy_stm_setup(void) { stm_setup(); - stm_register_thread_local(&stm_thread_local); + pypy_stm_register_thread_local(); pypy_stm_ready_atomic = 1; pypy_stm_set_transaction_length(100); pypy_stm_start_inevitable_if_not_atomic(); @@ -59,7 +59,7 @@ if (pypy_stm_ready_atomic == 0) { /* first time we see this thread */ int e = errno; - stm_register_thread_local(&stm_thread_local); + pypy_stm_register_thread_local(); errno = e; pypy_stm_ready_atomic = 1; pypy_stm_start_inevitable_if_not_atomic(); diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -13,7 +13,8 @@ extern __thread uintptr_t pypy_stm_nursery_low_fill_mark; void pypy_stm_setup(void); -void pypy_stm_setup_prebuilt(void); /* generated into stm_prebuilt.c */ +void pypy_stm_setup_prebuilt(void); /* generated into stm_prebuilt.c */ +void pypy_stm_register_thread_local(void); /* generated into stm_prebuilt.c */ static inline void pypy_stm_commit_if_not_atomic(void) { int e = errno; From noreply at buildbot.pypy.org Wed Mar 19 11:06:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 11:06:15 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Oups, forgot to add this. Message-ID: <20140319100615.8CEEA1C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70088:b6ee514a4d42 Date: 2014-03-19 11:05 +0100 http://bitbucket.org/pypy/pypy/changeset/b6ee514a4d42/ Log: Oups, forgot to add this. diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/forksupport.c @@ -0,0 +1,296 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +/* XXX this is currently not doing copy-on-write, but simply forces a + copy of all pages as soon as fork() is called. */ + + +static char *fork_big_copy = NULL; +static stm_thread_local_t *fork_this_tl; +static bool fork_was_in_transaction; + +static char *setup_mmap(char *reason); /* forward, in setup.c */ +static pthread_t *_get_cpth(stm_thread_local_t *);/* forward, in setup.c */ + + +static bool page_is_null(char *p) +{ + long *q = (long *)p; + long i; + for (i = 0; i < 4096 / sizeof(long); i++) + if (q[i] != 0) + return false; + return true; +} + + +static void forksupport_prepare(void) +{ + if (stm_object_pages == NULL) + return; + + /* So far we attempt to check this by walking all stm_thread_local_t, + marking the one from the current thread, and verifying that it's not + running a transaction. This assumes that the stm_thread_local_t is just + a __thread variable, so never changes threads. + */ + s_mutex_lock(); + + dprintf(("forksupport_prepare\n")); + + stm_thread_local_t *this_tl = NULL; + stm_thread_local_t *tl = stm_all_thread_locals; + do { + if (pthread_equal(*_get_cpth(tl), pthread_self())) { + if (this_tl != NULL) + stm_fatalerror("fork(): found several stm_thread_local_t" + " from the same thread"); + this_tl = tl; + } + tl = tl->next; + } while (tl != stm_all_thread_locals); + + if (this_tl == NULL) + stm_fatalerror("fork(): found no stm_thread_local_t from this thread"); + s_mutex_unlock(); + + bool was_in_transaction = _stm_in_transaction(this_tl); + if (was_in_transaction) { + stm_become_inevitable("fork"); + /* Note that the line above can still fail and abort, which should + be fine */ + } + else { + stm_start_inevitable_transaction(this_tl); + } + + s_mutex_lock(); + synchronize_all_threads(); + mutex_pages_lock(); + + /* Make a new mmap at some other address, but of the same size as + the standard mmap at stm_object_pages + */ + char *big_copy = setup_mmap("stmgc's fork support"); + + /* Copy each of the segment infos into the new mmap, nurseries, + and associated read markers + */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *src, *dst; + struct stm_priv_segment_info_s *psrc = get_priv_segment(i); + dst = big_copy + (((char *)psrc) - stm_object_pages); + *(struct stm_priv_segment_info_s *)dst = *psrc; + + src = get_segment_base(i) + FIRST_READMARKER_PAGE * 4096UL; + dst = big_copy + (src - stm_object_pages); + long j; + for (j = 0; j < END_NURSERY_PAGE - FIRST_READMARKER_PAGE; j++) { + if (!page_is_null(src)) + pagecopy(dst, src); + src += 4096; + dst += 4096; + } + } + + /* Copy all the data from the two ranges of objects (large, small) + into the new mmap + */ + uintptr_t pagenum, endpagenum; + pagenum = END_NURSERY_PAGE; /* starts after the nursery */ + endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; + if (endpagenum < NB_PAGES) + endpagenum++; /* the next page too, because it might contain + data from largemalloc */ + + while (1) { + if (UNLIKELY(pagenum == endpagenum)) { + /* we reach this point usually twice, because there are + more pages after 'uninitialized_page_stop' */ + if (endpagenum == NB_PAGES) + break; /* done */ + pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; + pagenum--; /* the prev page too, because it does contain + data from largemalloc */ + endpagenum = NB_PAGES; + } + + char *src = stm_object_pages + pagenum * 4096UL; + char *dst = big_copy + pagenum * 4096UL; + pagecopy(dst, src); + + struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; + if (ps.by_segment != 0) { + long j; + for (j = 0; j < NB_SEGMENTS; j++) { + src += NB_PAGES * 4096UL; + dst += NB_PAGES * 4096UL; + if (ps.by_segment & (1 << j)) { + pagecopy(dst, src); + } + } + } + pagenum++; + } + + assert(fork_big_copy == NULL); + fork_big_copy = big_copy; + fork_this_tl = this_tl; + fork_was_in_transaction = was_in_transaction; + + assert(_has_mutex()); + printf("forksupport_prepare: from %p %p\n", fork_this_tl, + fork_this_tl->creating_pthread[0]); +} + +static void forksupport_parent(void) +{ + if (stm_object_pages == NULL) + return; + + printf("forksupport_parent: continuing to run %p %p\n", fork_this_tl, + fork_this_tl->creating_pthread[0]); + assert(_has_mutex()); + assert(_is_tl_registered(fork_this_tl)); + + /* In the parent, after fork(), we can simply forget about the big copy + that we made for the child. + */ + assert(fork_big_copy != NULL); + munmap(fork_big_copy, TOTAL_MEMORY); + fork_big_copy = NULL; + bool was_in_transaction = fork_was_in_transaction; + + mutex_pages_unlock(); + s_mutex_unlock(); + + if (!was_in_transaction) { + stm_commit_transaction(); + } + + dprintf(("forksupport_parent: continuing to run\n")); +} + +static void fork_abort_thread(long i) +{ + struct stm_priv_segment_info_s *pr = get_priv_segment(i); + dprintf(("forksupport_child: abort in seg%ld\n", i)); + assert(pr->pub.running_thread->associated_segment_num == i); + assert(pr->transaction_state == TS_REGULAR); + set_gs_register(get_segment_base(i)); + + stm_jmpbuf_t jmpbuf; + if (__builtin_setjmp(jmpbuf) == 0) { + pr->pub.jmpbuf_ptr = &jmpbuf; +#ifndef NDEBUG + pr->running_pthread = pthread_self(); +#endif + stm_abort_transaction(); + } +} + +static void forksupport_child(void) +{ + if (stm_object_pages == NULL) + return; + + /* this new process contains no other thread, so we can + just release these locks early */ + mutex_pages_unlock(); + s_mutex_unlock(); + + /* Move the copy of the mmap over the old one, overwriting it + and thus freeing the old mapping in this process + */ + assert(fork_big_copy != NULL); + assert(stm_object_pages != NULL); + void *res = mremap(fork_big_copy, TOTAL_MEMORY, TOTAL_MEMORY, + MREMAP_MAYMOVE | MREMAP_FIXED, + stm_object_pages); + if (res != stm_object_pages) + stm_fatalerror("after fork: mremap failed: %m"); + fork_big_copy = NULL; + + /* Unregister all other stm_thread_local_t, mostly as a way to free + the memory used by the shadowstacks + */ + while (stm_all_thread_locals->next != stm_all_thread_locals) { + if (stm_all_thread_locals == fork_this_tl) + stm_unregister_thread_local(stm_all_thread_locals->next); + else + stm_unregister_thread_local(stm_all_thread_locals); + } + assert(stm_all_thread_locals == fork_this_tl); + + /* Make all pages shared again. + */ + uintptr_t pagenum, endpagenum; + pagenum = END_NURSERY_PAGE; /* starts after the nursery */ + endpagenum = (uninitialized_page_start - stm_object_pages) / 4096UL; + + while (1) { + if (UNLIKELY(pagenum == endpagenum)) { + /* we reach this point usually twice, because there are + more pages after 'uninitialized_page_stop' */ + if (endpagenum == NB_PAGES) + break; /* done */ + pagenum = (uninitialized_page_stop - stm_object_pages) / 4096UL; + endpagenum = NB_PAGES; + if (endpagenum == NB_PAGES) + break; /* done */ + } + + struct page_shared_s ps = pages_privatized[pagenum - PAGE_FLAG_START]; + long j; + for (j = 0; j < NB_SEGMENTS; j++) { + if (!(ps.by_segment & (1 << j))) { + _page_do_reshare(j + 1, pagenum); + } + } + pagenum++; + } + + /* Force the interruption of other running segments + */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pr = get_priv_segment(i); + if (pr->pub.running_thread != NULL && + pr->pub.running_thread != fork_this_tl) { + fork_abort_thread(i); + } + } + + /* Restore a few things: the new pthread_self(), and the %gs + register */ + int segnum = fork_this_tl->associated_segment_num; + assert(1 <= segnum && segnum <= NB_SEGMENTS); + *_get_cpth(fork_this_tl) = pthread_self(); + set_gs_register(get_segment_base(segnum)); + assert(STM_SEGMENT->segment_num == segnum); + + if (!fork_was_in_transaction) { + stm_commit_transaction(); + } + + /* Done */ + dprintf(("forksupport_child: running one thread now\n")); +} + + +static void setup_forksupport(void) +{ + static bool fork_support_ready = false; + + if (!fork_support_ready) { + int res = pthread_atfork(forksupport_prepare, forksupport_parent, + forksupport_child); + if (res != 0) + stm_fatalerror("pthread_atfork() failed: %m"); + fork_support_ready = true; + } +} From noreply at buildbot.pypy.org Wed Mar 19 12:06:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 19 Mar 2014 12:06:09 +0100 (CET) Subject: [pypy-commit] pypy default: fix reload/reimport of builtin modules (issue1514) (patch adapted from yamt) Message-ID: <20140319110609.D86EE1C1578@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70089:6002c93c0cc0 Date: 2014-03-19 07:01 -0400 http://bitbucket.org/pypy/pypy/changeset/6002c93c0cc0/ Log: fix reload/reimport of builtin modules (issue1514) (patch adapted from yamt) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -440,10 +440,11 @@ return name - def getbuiltinmodule(self, name, force_init=False): + def getbuiltinmodule(self, name, force_init=False, reuse=True): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: + assert reuse is True try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -459,10 +460,17 @@ "getbuiltinmodule() called with non-builtin module %s", name) else: - # And initialize it + # Initialize the module from pypy.interpreter.module import Module if isinstance(w_mod, Module): - w_mod.init(self) + if not reuse and w_mod.startup_called: + # Create a copy of the module + w_new = self.wrap(Module(self, w_name)) + self.call_method(w_new.getdict(self), 'update', + w_mod.w_initialdict) + w_mod = w_new + else: + w_mod.init(self) # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -579,7 +579,8 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True) + return space.getbuiltinmodule(find_info.filename, force_init=True, + reuse=reuse) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -203,7 +203,6 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 - skip("fix me") import sys, marshal old = marshal.loads @@ -223,7 +222,6 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( - skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -578,7 +578,6 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): - skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -586,7 +585,6 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): - skip("fix me") import sys, time oldpath = sys.path time.tzset = "" From noreply at buildbot.pypy.org Wed Mar 19 12:29:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 19 Mar 2014 12:29:19 +0100 (CET) Subject: [pypy-commit] pypy default: fix last commit Message-ID: <20140319112920.019381C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70090:4e0378f68161 Date: 2014-03-19 07:27 -0400 http://bitbucket.org/pypy/pypy/changeset/4e0378f68161/ Log: fix last commit diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -465,6 +465,7 @@ if isinstance(w_mod, Module): if not reuse and w_mod.startup_called: # Create a copy of the module + w_mod.getdict(self) # unlazy w_initialdict w_new = self.wrap(Module(self, w_name)) self.call_method(w_new.getdict(self), 'update', w_mod.w_initialdict) From noreply at buildbot.pypy.org Wed Mar 19 12:57:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 12:57:14 +0100 (CET) Subject: [pypy-commit] stmgc default: update the TODO list Message-ID: <20140319115714.E96951C1578@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1079:6937d94d24cc Date: 2014-03-19 12:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/6937d94d24cc/ Log: update the TODO list diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -1,15 +1,14 @@ -known-working revision: 5e4ec1af0e0c +known-working revision: 75893b92af4e - use small uniform gcpages - write barrier for big arrays -- weakrefs - finalizers - the highest_overflow_number can overflow after 2**30 non-collect-time minor collections -- re-enable the buggy RESHARE_PAGES=1, probably with a better impl +- fork() is done by copying the whole mmap non-lazily; improve. From noreply at buildbot.pypy.org Wed Mar 19 17:04:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 17:04:12 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: update Message-ID: <20140319160412.0BCD21D2824@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70092:ee693a309114 Date: 2014-03-19 17:03 +0100 http://bitbucket.org/pypy/pypy/changeset/ee693a309114/ Log: update diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -28,7 +28,9 @@ ------------------------------------------------------------ -os.fork()! +reintroduce 'stm_ignored', disabled in 4294a7789103 for causing +a rare bug in ll_strhash() shown by http://bpaste.net/show/190868/ +failing after a few minutes (or just running translate.py) ------------------------------------------------------------ From noreply at buildbot.pypy.org Wed Mar 19 17:04:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 17:04:10 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Temporary: disable stm_ignored Message-ID: <20140319160410.9EB241D2822@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70091:4294a7789103 Date: 2014-03-19 16:20 +0100 http://bitbucket.org/pypy/pypy/changeset/4294a7789103/ Log: Temporary: disable stm_ignored diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -53,13 +53,13 @@ "unbalanced stm_ignore_start/stm_ignore_stop in block") def gct_stm_ignored_start(self, hop): - assert not self.in_stm_ignored - self.in_stm_ignored = True + #assert not self.in_stm_ignored + #self.in_stm_ignored = True self.default(hop) def gct_stm_ignored_stop(self, hop): - assert self.in_stm_ignored - self.in_stm_ignored = False + #assert self.in_stm_ignored + #self.in_stm_ignored = False self.default(hop) def var_needs_set_transform(self, var): diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -76,11 +76,11 @@ [op.args[0]], v_none)) transformer.read_barrier_counts += 1 elif op.opname == 'stm_ignored_start': - assert stm_ignored == False - stm_ignored = True + pass #assert stm_ignored == False + #stm_ignored = True elif op.opname == 'stm_ignored_stop': - assert stm_ignored == True - stm_ignored = False + pass #assert stm_ignored == True + #stm_ignored = False newops.append(op) assert stm_ignored == False block.operations = newops From noreply at buildbot.pypy.org Wed Mar 19 17:34:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 17:34:12 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Add a "debug_stm" makefile target to compile with STM_DEBUGPRINTs Message-ID: <20140319163412.28A071D2824@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70093:ead53269de59 Date: 2014-03-19 17:33 +0100 http://bitbucket.org/pypy/pypy/changeset/ead53269de59/ Log: Add a "debug_stm" makefile target to compile with STM_DEBUGPRINTs diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -461,6 +461,7 @@ ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(TARGET)'), ('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT -DRPY_STM_ASSERT" debug_target'), ('lldebug0','', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -O0 -DRPY_ASSERT -DRPY_LL_ASSERT -DRPY_STM_ASSERT" debug_target'), + ('debug_stm','', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -O0 -DRPY_ASSERT -DRPY_LL_ASSERT -DRPY_STM_ASSERT -DSTM_DEBUGPRINT" debug_target'), ('profile', '', '$(MAKE) CFLAGS="-g -O1 -pg $(CFLAGS) -fno-omit-frame-pointer" LDFLAGS="-pg $(LDFLAGS)" $(TARGET)'), ] if self.has_profopt(): From noreply at buildbot.pypy.org Wed Mar 19 17:41:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Mar 2014 17:41:58 +0100 (CET) Subject: [pypy-commit] pypy default: Yet another attempt to fix these C compiler warnings: don't Message-ID: <20140319164158.3089D1C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70094:381eb668a116 Date: 2014-03-19 17:41 +0100 http://bitbucket.org/pypy/pypy/changeset/381eb668a116/ Log: Yet another attempt to fix these C compiler warnings: don't use an enum at all, just declare the type as 'int'. diff --git a/pypy/module/cpyext/include/pystate.h b/pypy/module/cpyext/include/pystate.h --- a/pypy/module/cpyext/include/pystate.h +++ b/pypy/module/cpyext/include/pystate.h @@ -21,9 +21,8 @@ #define Py_END_ALLOW_THREADS PyEval_RestoreThread(_save); \ } -typedef - enum {PyGILState_LOCKED, PyGILState_UNLOCKED} - PyGILState_STATE; +enum {PyGILState_LOCKED, PyGILState_UNLOCKED}; +typedef int PyGILState_STATE; #define PyThreadState_GET() PyThreadState_Get() diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -204,16 +204,14 @@ # Before external call is after running Python rffi.aroundstate.before() -PyGILState_STATE = rffi.COpaquePtr('PyGILState_STATE', - typedef='PyGILState_STATE', - compilation_info=CConfig._compilation_info_) +PyGILState_STATE = rffi.INT @cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) def PyGILState_Ensure(space): if rffi.aroundstate.after: # After external call is before entering Python rffi.aroundstate.after() - return lltype.nullptr(PyGILState_STATE.TO) + return rffi.cast(PyGILState_STATE, 0) @cpython_api([PyGILState_STATE], lltype.Void) def PyGILState_Release(space, state): From noreply at buildbot.pypy.org Wed Mar 19 18:13:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 19 Mar 2014 18:13:41 +0100 (CET) Subject: [pypy-commit] pypy default: print as a statement is bad (py3k compat) Message-ID: <20140319171341.C301E1C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70095:c6058cfbd0c1 Date: 2014-03-19 10:12 -0700 http://bitbucket.org/pypy/pypy/changeset/c6058cfbd0c1/ Log: print as a statement is bad (py3k compat) diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -11,7 +11,7 @@ def w_marshal_check(self, case): import marshal, StringIO s = marshal.dumps(case) - print repr(s) + print(repr(s)) x = marshal.loads(s) assert x == case and type(x) is type(case) f = StringIO.StringIO() @@ -173,7 +173,7 @@ import marshal types = (float, complex, int, long, tuple, list, dict, set, frozenset) for cls in types: - print cls + print(cls) class subtype(cls): pass exc = raises(ValueError, marshal.dumps, subtype) From noreply at buildbot.pypy.org Wed Mar 19 20:37:58 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 19 Mar 2014 20:37:58 +0100 (CET) Subject: [pypy-commit] jitviewer default: Backed out changeset e0b82d426d01 - it actually does not run on cpython Message-ID: <20140319193758.CC68F1D2543@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r257:8488aa916ea4 Date: 2014-03-19 21:37 +0200 http://bitbucket.org/pypy/jitviewer/changeset/8488aa916ea4/ Log: Backed out changeset e0b82d426d01 - it actually does not run on cpython diff --git a/bin/jitviewer.py b/bin/jitviewer.py --- a/bin/jitviewer.py +++ b/bin/jitviewer.py @@ -6,5 +6,10 @@ pythonpath = os.path.dirname(os.path.dirname(script_path)) sys.path.append(pythonpath) +# Check we are running with PyPy first. +if not '__pypy__' in sys.builtin_module_names: + from _jitviewer.misc import failout + failout("jitviewer must be run with PyPy") + from _jitviewer.app import main main(sys.argv) From noreply at buildbot.pypy.org Wed Mar 19 20:55:32 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 19 Mar 2014 20:55:32 +0100 (CET) Subject: [pypy-commit] pypy elidable-canfold-exception: failing test Message-ID: <20140319195532.C89A51C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: elidable-canfold-exception Changeset: r70096:361433cd94a2 Date: 2014-03-18 14:16 +0200 http://bitbucket.org/pypy/pypy/changeset/361433cd94a2/ Log: failing test diff --git a/rpython/jit/metainterp/test/test_call.py b/rpython/jit/metainterp/test/test_call.py --- a/rpython/jit/metainterp/test/test_call.py +++ b/rpython/jit/metainterp/test/test_call.py @@ -44,6 +44,25 @@ self.meta_interp(main, [10]) + def test_call_elidable_raise(self): + @jit.elidable(canfolderror=(ValueError,)) + def f(a): + raise ValueError + + driver = jit.JitDriver(greens = [], reds = 'auto') + + def main(n): + while n >= 0: + driver.jit_merge_point() + try: + f(1) + except ValueError: + pass + n -= 1 + + self.meta_interp(main, [10]) + self.check_simple_loop(call=0) + def test_cond_call(self): def f(l, n): l.append(n) From noreply at buildbot.pypy.org Wed Mar 19 20:55:34 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 19 Mar 2014 20:55:34 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: implement interiorfieldsupport for writeanalyzer Message-ID: <20140319195534.3BD3E1C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70097:3bf5dd780599 Date: 2014-03-19 20:28 +0200 http://bitbucket.org/pypy/pypy/changeset/3bf5dd780599/ Log: implement interiorfieldsupport for writeanalyzer diff --git a/rpython/translator/backendopt/test/test_writeanalyze.py b/rpython/translator/backendopt/test/test_writeanalyze.py --- a/rpython/translator/backendopt/test/test_writeanalyze.py +++ b/rpython/translator/backendopt/test/test_writeanalyze.py @@ -353,3 +353,23 @@ result = wa.analyze(fgraph.startblock.operations[-1]) assert list(result) == [("struct", lltype.Ptr(S), "x")] + + def test_interiorfield(self): + A = lltype.GcArray(lltype.Struct('x', ('x', lltype.Signed), + ('y', lltype.Signed))) + + def g(x): + a = lltype.malloc(A, 1) + a[0].y = 3 + return f(a, x) + + def f(a, x): + a[0].x = x + return a[0].y + + t, wa = self.translate(g, [int]) + ggraph = graphof(t, g) + result = wa.analyze(ggraph.startblock.operations[-1]) + res = list(result) + assert ('readinteriorfield', lltype.Ptr(A), 'y') in res + assert ('interiorfield', lltype.Ptr(A), 'x') in res diff --git a/rpython/translator/backendopt/writeanalyze.py b/rpython/translator/backendopt/writeanalyze.py --- a/rpython/translator/backendopt/writeanalyze.py +++ b/rpython/translator/backendopt/writeanalyze.py @@ -45,11 +45,18 @@ elif op.opname == "setarrayitem": if graphinfo is None or not graphinfo.is_fresh_malloc(op.args[0]): return self._array_result(op.args[0].concretetype) + elif op.opname == "setinteriorfield": + if graphinfo is None or not graphinfo.is_fresh_malloc(op.args[0]): + return self._interiorfield_result(op.args[0].concretetype, + op.args[2].value) return empty_set def _array_result(self, TYPE): return frozenset([("array", TYPE)]) + def _interiorfield_result(self, TYPE, fieldname): + return frozenset([("interiorfield", TYPE, fieldname)]) + def compute_graph_info(self, graph): return FreshMallocs(graph) @@ -99,4 +106,8 @@ elif op.opname == "getarrayitem": return frozenset([ ("readarray", op.args[0].concretetype)]) + elif op.opname == "getinteriorfield": + return frozenset([ + ("readinteriorfield", op.args[0].concretetype, + op.args[2].value)]) return WriteAnalyzer.analyze_simple_operation(self, op, graphinfo) From noreply at buildbot.pypy.org Wed Mar 19 20:55:35 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 19 Mar 2014 20:55:35 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: sort out the issue with interiorfielddescrs (I think) and dicts Message-ID: <20140319195535.8C6741C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70098:2cf24861ad15 Date: 2014-03-19 21:48 +0200 http://bitbucket.org/pypy/pypy/changeset/2cf24861ad15/ Log: sort out the issue with interiorfielddescrs (I think) and dicts diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -93,12 +93,14 @@ ]) def __new__(cls, readonly_descrs_fields, readonly_descrs_arrays, + readonly_descrs_interiorfields, write_descrs_fields, write_descrs_arrays, + write_descrs_interiorfields, extraeffect=EF_CAN_RAISE, oopspecindex=OS_NONE, can_invalidate=False, call_release_gil_target=llmemory.NULL, - extradescr=None): + extradescrs=None): key = (frozenset_or_none(readonly_descrs_fields), frozenset_or_none(readonly_descrs_arrays), frozenset_or_none(write_descrs_fields), @@ -123,18 +125,21 @@ result = object.__new__(cls) result.readonly_descrs_fields = readonly_descrs_fields result.readonly_descrs_arrays = readonly_descrs_arrays + result.readonly_descrs_interiorfields = readonly_descrs_interiorfields if extraeffect == EffectInfo.EF_LOOPINVARIANT or \ extraeffect == EffectInfo.EF_ELIDABLE_CANNOT_RAISE or \ extraeffect == EffectInfo.EF_ELIDABLE_CAN_RAISE: result.write_descrs_fields = [] result.write_descrs_arrays = [] + result.write_descrs_interiorfields = [] else: result.write_descrs_fields = write_descrs_fields result.write_descrs_arrays = write_descrs_arrays + result.write_descrs_interiorfields = write_descrs_interiorfields result.extraeffect = extraeffect result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex - result.extradescr = extradescr + result.extradescrs = extradescrs result.call_release_gil_target = call_release_gil_target if result.check_can_raise(): assert oopspecindex in cls._OS_CANRAISE @@ -166,7 +171,7 @@ return None return frozenset(x) -EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, +EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, None, None, EffectInfo.EF_RANDOM_EFFECTS, can_invalidate=True) @@ -181,14 +186,18 @@ if effects is top_set or extraeffect == EffectInfo.EF_RANDOM_EFFECTS: readonly_descrs_fields = None readonly_descrs_arrays = None + readonly_descrs_interiorfields = None write_descrs_fields = None write_descrs_arrays = None + write_descrs_interiorfields = None extraeffect = EffectInfo.EF_RANDOM_EFFECTS else: readonly_descrs_fields = [] readonly_descrs_arrays = [] + readonly_descrs_interiorfields = [] write_descrs_fields = [] write_descrs_arrays = [] + write_descrs_interiorfields = [] def add_struct(descrs_fields, (_, T, fieldname)): T = deref(T) @@ -202,6 +211,17 @@ descr = cpu.arraydescrof(ARRAY) descrs_arrays.append(descr) + def add_interiorfield(descrs_interiorfields, (_, T, fieldname)): + T = deref(T) + if not isinstance(T, lltype.Array): + return # let's not consider structs for now + if not consider_array(T): + return + if getattr(T.OF, fieldname) is lltype.Void: + return + descr = cpu.interiorfielddescrof(T, fieldname) + descrs_interiorfields.append(descr) + for tup in effects: if tup[0] == "struct": add_struct(write_descrs_fields, tup) @@ -209,6 +229,12 @@ tupw = ("struct",) + tup[1:] if tupw not in effects: add_struct(readonly_descrs_fields, tup) + elif tup[0] == "interiorfield": + add_interiorfield(write_descrs_interiorfields, tup) + elif tup[0] == "readinteriorfield": + tupw = ('interiorfield',) + tup[1:] + if tupw not in effects: + add_interiorfield(readonly_descrs_interiorfields, tup) elif tup[0] == "array": add_array(write_descrs_arrays, tup) elif tup[0] == "readarray": @@ -220,8 +246,10 @@ # return EffectInfo(readonly_descrs_fields, readonly_descrs_arrays, + readonly_descrs_interiorfields, write_descrs_fields, write_descrs_arrays, + write_descrs_interiorfields, extraeffect, oopspecindex, can_invalidate, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1852,10 +1852,12 @@ EffectInfo.EF_ELIDABLE_CANNOT_RAISE) def _handle_dict_lookup_call(self, op, oopspec_name, args): - extradescr = self.cpu.fielddescrof(op.args[1].concretetype.TO, - 'entries') + extradescr1 = self.cpu.fielddescrof(op.args[1].concretetype.TO, + 'entries') + extradescr2 = self.cpu.interiorfielddescrof( + op.args[1].concretetype.TO.entries.TO, 'key') return self._handle_oopspec_call(op, args, EffectInfo.OS_DICT_LOOKUP, - extradescr=extradescr) + extradescr=[extradescr1, extradescr2]) def _handle_rgc_call(self, op, oopspec_name, args): if oopspec_name == 'rgc.ll_shrink_array': diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -177,6 +177,8 @@ self.cached_arrayitems = {} # cached dict items: {dict descr: {(optval, index): box-or-const}} self.cached_dict_reads = {} + # cache of corresponding array descrs + self.corresponding_array_descrs = {} # self._lazy_setfields_and_arrayitems = [] self._remove_guard_not_invalidated = False @@ -185,10 +187,12 @@ def force_at_end_of_preamble(self): self.cached_dict_reads.clear() + self.corresponding_array_descrs.clear() self.force_all_lazy_setfields_and_arrayitems() def flush(self): self.cached_dict_reads.clear() + self.corresponding_array_descrs.clear() self.force_all_lazy_setfields_and_arrayitems() self.emit_postponed_op() @@ -301,11 +305,14 @@ self.emit_operation(op) def _optimize_CALL_DICT_LOOKUP(self, op): - descr = op.getdescr().get_extra_info().extradescr - if descr in self.cached_dict_reads: - d = self.cached_dict_reads[descr] + descrs = op.getdescr().get_extra_info().extradescrs + descr1 = descrs[0] + descr2 = descrs[1] + if descr1 in self.cached_dict_reads: + d = self.cached_dict_reads[descr1] else: - d = self.cached_dict_reads[descr] = args_dict() + d = self.cached_dict_reads[descr1] = args_dict() + self.corresponding_array_descrs[descr2] = descr1 args = self.optimizer.make_args_key(op) try: res_v = d[args] @@ -339,6 +346,13 @@ self.force_lazy_setfield(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) + for descr in effectinfo.write_descrs_interiorfields: + if descr in self.corresponding_array_descrs: + dictdescr = self.corresponding_array_descrs.pop(descr) + try: + del self.cached_dict_reads[dictdescr] + except KeyError: + pass # someone did it already if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info self.force_lazy_setfield(vrefinfo.descr_forced) diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -253,6 +253,28 @@ assert res == f(10) self.check_simple_loop(call=5) + def test_dict_array_write_invalidates_caches(self): + driver = JitDriver(greens = [], reds = 'auto') + indexes = ['aa', 'b', 'cc'] + + def f(n): + d = {'aa': 3, 'b': 4, 'cc': 5} + s = 0 + while n > 0: + driver.jit_merge_point() + index = indexes[n & 1] + s += d[index] + del d['cc'] + s += d[index] + d['cc'] = 3 + n -= 1 + return s + + exp = f(10) + res = self.meta_interp(f, [10]) + assert res == exp + self.check_simple_loop(call=7) + def test_dict_double_lookup_2(self): driver = JitDriver(greens = [], reds = 'auto') indexes = ['aa', 'b', 'cc'] diff --git a/rpython/translator/backendopt/writeanalyze.py b/rpython/translator/backendopt/writeanalyze.py --- a/rpython/translator/backendopt/writeanalyze.py +++ b/rpython/translator/backendopt/writeanalyze.py @@ -1,4 +1,4 @@ -from rpython.flowspace.model import Variable +from rpython.flowspace.model import Variable, Constant from rpython.translator.backendopt import graphanalyze top_set = object() @@ -37,6 +37,12 @@ return top_set return result1.union(result2) + def _getinteriorname(self, op): + if (isinstance(op.args[1], Constant) and + isinstance(op.args[1].value, str)): + return op.args[1].value + return op.args[2].value + def analyze_simple_operation(self, op, graphinfo): if op.opname == "setfield": if graphinfo is None or not graphinfo.is_fresh_malloc(op.args[0]): @@ -47,8 +53,8 @@ return self._array_result(op.args[0].concretetype) elif op.opname == "setinteriorfield": if graphinfo is None or not graphinfo.is_fresh_malloc(op.args[0]): - return self._interiorfield_result(op.args[0].concretetype, - op.args[2].value) + name = self._getinteriorname(op) + return self._interiorfield_result(op.args[0].concretetype, name) return empty_set def _array_result(self, TYPE): @@ -107,7 +113,7 @@ return frozenset([ ("readarray", op.args[0].concretetype)]) elif op.opname == "getinteriorfield": - return frozenset([ - ("readinteriorfield", op.args[0].concretetype, - op.args[2].value)]) + name = self._getinteriorname(op) + return frozenset([("readinteriorfield", op.args[0].concretetype, + name)]) return WriteAnalyzer.analyze_simple_operation(self, op, graphinfo) From noreply at buildbot.pypy.org Wed Mar 19 21:31:32 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 19 Mar 2014 21:31:32 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: fix Message-ID: <20140319203132.0F04A1D284F@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70099:498fe7267231 Date: 2014-03-19 22:30 +0200 http://bitbucket.org/pypy/pypy/changeset/498fe7267231/ Log: fix diff --git a/rpython/jit/metainterp/virtualizable.py b/rpython/jit/metainterp/virtualizable.py --- a/rpython/jit/metainterp/virtualizable.py +++ b/rpython/jit/metainterp/virtualizable.py @@ -302,7 +302,7 @@ self.clear_vable_ptr = self.warmrunnerdesc.helper_func( FUNCPTR, self.clear_vable_token) FUNC = FUNCPTR.TO - ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + ei = EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, can_invalidate=False, oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) From noreply at buildbot.pypy.org Wed Mar 19 21:37:42 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 19 Mar 2014 21:37:42 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: fix some tests Message-ID: <20140319203742.4467D1D288F@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70100:89278beb6037 Date: 2014-03-19 22:37 +0200 http://bitbucket.org/pypy/pypy/changeset/89278beb6037/ Log: fix some tests diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -60,7 +60,8 @@ class FakeResidualCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None, + extradescr=None): return 'calldescr' def calldescr_canraise(self, calldescr): return True @@ -117,7 +118,8 @@ self.callinfocollection = FakeCallInfoCollection() def guess_call_kind(self, op): return 'builtin' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None, + extradescr=None): assert oopspecindex is not None # in this test EI = effectinfo.EffectInfo if oopspecindex != EI.OS_ARRAYCOPY: diff --git a/rpython/jit/codewriter/test/test_list.py b/rpython/jit/codewriter/test/test_list.py --- a/rpython/jit/codewriter/test/test_list.py +++ b/rpython/jit/codewriter/test/test_list.py @@ -37,7 +37,8 @@ class FakeCallControl: class getcalldescr(AbstractDescr): - def __init__(self, op, oopspecindex=0, extraeffect=None): + def __init__(self, op, oopspecindex=0, extraeffect=None, + extradescr=None): self.op = op self.oopspecindex = oopspecindex def __repr__(self): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -181,28 +181,29 @@ plaincalldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo.MOST_GENERAL) nonwritedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [])) + EffectInfo([], [], [], [], [], [])) writeadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [adescr], [])) + EffectInfo([], [], [adescr], [], [], [])) writearraydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [adescr], [arraydescr])) + EffectInfo([], [], [adescr], [arraydescr], + [], [])) readadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([adescr], [], [], [])) + EffectInfo([adescr], [], [], [], [], [])) mayforcevirtdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([nextdescr], [], [], [], + EffectInfo([nextdescr], [], [], [], [], [], EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE, can_invalidate=True)) arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [arraydescr], [], [arraydescr], + EffectInfo([], [arraydescr], [], [arraydescr], [], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_ARRAYCOPY)) raw_malloc_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], + EffectInfo([], [], [], [], [], [], EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR)) raw_free_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], + EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_RAW_FREE)) @@ -251,17 +252,18 @@ _oopspecindex = getattr(EffectInfo, _os) locals()[_name] = \ cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=_oopspecindex)) # _oopspecindex = getattr(EffectInfo, _os.replace('STR', 'UNI')) locals()[_name.replace('str', 'unicode')] = \ cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=_oopspecindex)) s2u_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], oopspecindex=EffectInfo.OS_STR2UNICODE)) + EffectInfo([], [], [], [], [], [], + oopspecindex=EffectInfo.OS_STR2UNICODE)) # class LoopToken(AbstractDescr): @@ -277,7 +279,7 @@ virtualtokendescr = vrefinfo.descr_virtual_token virtualforceddescr = vrefinfo.descr_forced FUNC = lltype.FuncType([], lltype.Void) - ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + ei = EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, can_invalidate=False, oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) clear_vable = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, ei) From noreply at buildbot.pypy.org Wed Mar 19 21:43:59 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 19 Mar 2014 21:43:59 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: two more fixes Message-ID: <20140319204359.DB85D1D28C3@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70101:20b546accb72 Date: 2014-03-19 22:43 +0200 http://bitbucket.org/pypy/pypy/changeset/20b546accb72/ Log: two more fixes diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -183,10 +183,10 @@ nonwritedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [], [], [])) writeadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [adescr], [], [], [])) + EffectInfo([], [], [], [adescr], [], [])) writearraydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [adescr], [arraydescr], - [], [])) + EffectInfo([], [], [], [adescr], [arraydescr], + [])) readadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([adescr], [], [], [], [], [])) mayforcevirtdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, @@ -194,7 +194,7 @@ EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE, can_invalidate=True)) arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [arraydescr], [], [arraydescr], [], [], + EffectInfo([], [arraydescr], [], [], [arraydescr], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_ARRAYCOPY)) From noreply at buildbot.pypy.org Wed Mar 19 23:15:57 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 19 Mar 2014 23:15:57 +0100 (CET) Subject: [pypy-commit] pypy default: win32 compatability Message-ID: <20140319221557.5C8221C00B9@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70102:cbeca026046f Date: 2014-03-20 00:15 +0200 http://bitbucket.org/pypy/pypy/changeset/cbeca026046f/ Log: win32 compatability diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -14,6 +14,11 @@ includes = ['stdio.h', 'sys/types.h'] if os.name == "posix": includes += ['unistd.h'] + ftruncate = 'ftruncate' + fileno = 'fileno' +else: + ftruncate = '_chsize' + fileno = '_fileno' eci = ExternalCompilationInfo(includes=includes) def llexternal(*args, **kwargs): @@ -41,10 +46,10 @@ c_fseek = llexternal('fseek', [lltype.Ptr(FILE), rffi.LONG, rffi.INT], rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) -c_fileno = llexternal('fileno', [lltype.Ptr(FILE)], rffi.INT) +c_fileno = llexternal(fileno, [lltype.Ptr(FILE)], rffi.INT) c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) -c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT, macro=True) +c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, lltype.Ptr(FILE)], rffi.CCHARP) From noreply at buildbot.pypy.org Thu Mar 20 08:27:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 08:27:40 +0100 (CET) Subject: [pypy-commit] stmgc default: This contention management policy is much better. Document the Message-ID: <20140320072740.9D4FD1C13AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1080:9b94866e95da Date: 2014-03-20 08:27 +0100 http://bitbucket.org/pypy/stmgc/changeset/9b94866e95da/ Log: This contention management policy is much better. Document the problem with the previous one. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -69,6 +69,14 @@ __attribute__((unused)) static void cm_always_wait_for_other_thread(struct contmgr_s *cm) { + /* we tried this contention management, but it seems to have + very bad cases: if thread 1 always reads an object in every + transaction, and thread 2 wants to write this object just + once, then thread 2 will pause when it tries to commit; + it will wait until thread 1 committed; but by the time + thread 2 resumes again, thread 1 has already started the + next transaction and read the object again. + */ cm_abort_the_younger(cm); cm->try_sleep = true; } @@ -110,7 +118,7 @@ #ifdef STM_TESTS cm_abort_the_younger(&contmgr); #else - cm_always_wait_for_other_thread(&contmgr); + cm_pause_if_younger(&contmgr); #endif /* Fix the choices that are found incorrect due to TS_INEVITABLE From noreply at buildbot.pypy.org Thu Mar 20 08:30:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 08:30:38 +0100 (CET) Subject: [pypy-commit] stmgc default: Print a warning to stderr Message-ID: <20140320073038.7AFDF1C1578@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1081:0e5239ae07f2 Date: 2014-03-20 08:30 +0100 http://bitbucket.org/pypy/stmgc/changeset/0e5239ae07f2/ Log: Print a warning to stderr diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -39,6 +39,7 @@ s_mutex_lock(); dprintf(("forksupport_prepare\n")); + fprintf(stderr, "[forking: for now, this operation can take some time]\n"); stm_thread_local_t *this_tl = NULL; stm_thread_local_t *tl = stm_all_thread_locals; From noreply at buildbot.pypy.org Thu Mar 20 08:31:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 08:31:52 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/0e5239ae07f2 Message-ID: <20140320073152.2261C1C1578@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70103:7ace040eff96 Date: 2014-03-20 08:31 +0100 http://bitbucket.org/pypy/pypy/changeset/7ace040eff96/ Log: import stmgc/0e5239ae07f2 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -75893b92af4e +0e5239ae07f2 diff --git a/rpython/translator/stm/src_stm/stm/contention.c b/rpython/translator/stm/src_stm/stm/contention.c --- a/rpython/translator/stm/src_stm/stm/contention.c +++ b/rpython/translator/stm/src_stm/stm/contention.c @@ -70,6 +70,14 @@ __attribute__((unused)) static void cm_always_wait_for_other_thread(struct contmgr_s *cm) { + /* we tried this contention management, but it seems to have + very bad cases: if thread 1 always reads an object in every + transaction, and thread 2 wants to write this object just + once, then thread 2 will pause when it tries to commit; + it will wait until thread 1 committed; but by the time + thread 2 resumes again, thread 1 has already started the + next transaction and read the object again. + */ cm_abort_the_younger(cm); cm->try_sleep = true; } @@ -111,7 +119,7 @@ #ifdef STM_TESTS cm_abort_the_younger(&contmgr); #else - cm_always_wait_for_other_thread(&contmgr); + cm_pause_if_younger(&contmgr); #endif /* Fix the choices that are found incorrect due to TS_INEVITABLE diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c --- a/rpython/translator/stm/src_stm/stm/forksupport.c +++ b/rpython/translator/stm/src_stm/stm/forksupport.c @@ -40,6 +40,7 @@ s_mutex_lock(); dprintf(("forksupport_prepare\n")); + fprintf(stderr, "[forking: for now, this operation can take some time]\n"); stm_thread_local_t *this_tl = NULL; stm_thread_local_t *tl = stm_all_thread_locals; @@ -143,8 +144,8 @@ fork_was_in_transaction = was_in_transaction; assert(_has_mutex()); - printf("forksupport_prepare: from %p %p\n", fork_this_tl, - fork_this_tl->creating_pthread[0]); + dprintf(("forksupport_prepare: from %p %p\n", fork_this_tl, + fork_this_tl->creating_pthread[0])); } static void forksupport_parent(void) @@ -152,8 +153,8 @@ if (stm_object_pages == NULL) return; - printf("forksupport_parent: continuing to run %p %p\n", fork_this_tl, - fork_this_tl->creating_pthread[0]); + dprintf(("forksupport_parent: continuing to run %p %p\n", fork_this_tl, + fork_this_tl->creating_pthread[0])); assert(_has_mutex()); assert(_is_tl_registered(fork_this_tl)); From noreply at buildbot.pypy.org Thu Mar 20 10:30:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 10:30:58 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Don't produce stm_write() for writes of a Void value. Message-ID: <20140320093058.A0E931C08F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70104:c0537b11889f Date: 2014-03-20 10:30 +0100 http://bitbucket.org/pypy/pypy/changeset/c0537b11889f/ Log: Don't produce stm_write() for writes of a Void value. diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -71,7 +71,9 @@ v_struct = hop.spaceop.args[0] assert opname in ('setfield', 'setarrayitem', 'setinteriorfield', 'raw_store') - if not var_needsgc(v_struct): + if hop.spaceop.args[-1].concretetype == lltype.Void: + pass # ignore setfields of a Void type + elif not var_needsgc(v_struct): if (var_needsgc(hop.spaceop.args[-1]) and 'is_excdata' not in hop.spaceop.args[0].concretetype.TO._hints): raise Exception("%s: GC pointer written into a non-GC location" From noreply at buildbot.pypy.org Thu Mar 20 11:11:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 11:11:11 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Revert a change from c4 Message-ID: <20140320101111.B518D1C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70105:d41fc1add33b Date: 2014-03-20 10:41 +0100 http://bitbucket.org/pypy/pypy/changeset/d41fc1add33b/ Log: Revert a change from c4 diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -93,22 +93,11 @@ def _make_sure_does_not_move(p): """'p' is a non-null GC object. This (tries to) make sure that the object does not move any more, by forcing collections if needed. - It may return a different addr! Warning: should ideally only be used with the minimark GC, and only on objects that are already a bit old, so have a chance to be already non-movable.""" if not we_are_translated(): - if isinstance(p, _GcRef): - return cast_gcref_to_int(p) - else: - from rpython.rtyper.lltypesystem import rffi - return rffi.cast(lltype.Signed, p) - - if stm_is_enabled(): - from rpython.rtyper.lltypesystem.lloperation import llop - res = llop.stm_allocate_nonmovable_int_adr(lltype.Signed, p) - return res - + return i = 0 while can_move(p): if i > 6: @@ -116,8 +105,6 @@ collect(i) i += 1 - return 0 - def _heap_stats(): raise NotImplementedError # can't be run directly diff --git a/rpython/rlib/test/test_rgc.py b/rpython/rlib/test/test_rgc.py --- a/rpython/rlib/test/test_rgc.py +++ b/rpython/rlib/test/test_rgc.py @@ -228,5 +228,3 @@ x1 = X() n = rgc.get_rpy_memory_usage(rgc.cast_instance_to_gcref(x1)) assert n >= 8 and n <= 64 - - From noreply at buildbot.pypy.org Thu Mar 20 11:11:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 11:11:13 +0100 (CET) Subject: [pypy-commit] pypy default: tweaks; reducing the diff with stm Message-ID: <20140320101113.1BC6D1C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70106:37af0c2ee363 Date: 2014-03-20 11:06 +0100 http://bitbucket.org/pypy/pypy/changeset/37af0c2ee363/ Log: tweaks; reducing the diff with stm diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -287,7 +287,6 @@ cast_instance_to_gcref(self.cpu.propagate_exception_descr)) ofs = self.cpu.get_ofs_of_frame_field('jf_descr') self.mc.MOV(RawEbpLoc(ofs), imm(propagate_exception_descr)) - self.mc.MOV_rr(eax.value, ebp.value) # self._call_footer() rawstart = self.mc.materialize(self.cpu.asmmemmgr, []) @@ -761,6 +760,9 @@ # def _call_footer(self): + # the return value is the jitframe + self.mc.MOV_rr(eax.value, ebp.value) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: self._call_footer_shadowstack(gcrootmap) @@ -1827,8 +1829,6 @@ mc.MOV_br(ofs2, eax.value) mc.POP(eax) mc.MOV_br(ofs, eax.value) - # the return value is the jitframe - mc.MOV_rr(eax.value, ebp.value) self._call_footer() rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -1861,7 +1861,6 @@ # keep that one and kill all the others ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') self.mc.MOV_bi(ofs, 0) - self.mc.MOV_rr(eax.value, ebp.value) # exit function self._call_footer() From noreply at buildbot.pypy.org Thu Mar 20 11:11:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 11:11:14 +0100 (CET) Subject: [pypy-commit] pypy default: simplify Message-ID: <20140320101114.60E471C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70107:c548a82ba9d3 Date: 2014-03-20 11:07 +0100 http://bitbucket.org/pypy/pypy/changeset/c548a82ba9d3/ Log: simplify diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1825,10 +1825,8 @@ # did just above. ofs = self.cpu.get_ofs_of_frame_field('jf_descr') ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.POP(eax) - mc.MOV_br(ofs2, eax.value) - mc.POP(eax) - mc.MOV_br(ofs, eax.value) + mc.POP_b(ofs2) + mc.POP_b(ofs) self._call_footer() rawstart = mc.materialize(self.cpu.asmmemmgr, []) From noreply at buildbot.pypy.org Thu Mar 20 11:12:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 11:12:45 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Revert changes to 'make_sure_does_not_move(faildescr)' Message-ID: <20140320101245.C44411C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70108:a68436ce38ec Date: 2014-03-20 11:12 +0100 http://bitbucket.org/pypy/pypy/changeset/a68436ce38ec/ Log: Revert changes to 'make_sure_does_not_move(faildescr)' diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -623,7 +623,7 @@ ops_offset = self.mc.ops_offset if logger is not None: - logger.log_loop(inputargs, operations, looptoken.number, "rewritten", + logger.log_loop(inputargs, operations, 0, "rewritten", name=loopname, ops_offset=ops_offset) self.teardown() diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -630,10 +630,10 @@ # the frame is in fp, but we have to point where in the frame is # the potential argument to FINISH descr = op.getdescr() - fail_descr = rgc.cast_instance_to_gcref(descr) + fail_descr = cast_instance_to_gcref(descr) # we know it does not move, but well - fail_descr = rgc._make_sure_does_not_move(fail_descr) - fail_descr = rgc.cast_gcref_to_int(fail_descr) + rgc._make_sure_does_not_move(fail_descr) + fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -182,6 +182,8 @@ break exc = guardtok.exc target = self.failure_recovery_code[exc + 2 * withfloats] + fail_descr = cast_instance_to_gcref(guardtok.faildescr) + fail_descr = rffi.cast(lltype.Signed, fail_descr) base_ofs = self.cpu.get_baseofs_of_frame_field() positions = [0] * len(guardtok.fail_locs) for i, loc in enumerate(guardtok.fail_locs): @@ -204,8 +206,6 @@ guardtok.faildescr.rd_locs = positions # we want the descr to keep alive guardtok.faildescr.rd_loop_token = self.current_clt - fail_descr = rgc.cast_instance_to_gcref(guardtok.faildescr) - fail_descr = rgc._make_sure_does_not_move(fail_descr) return fail_descr, target def call_assembler(self, op, guard_op, argloc, vloc, result_loc, tmploc): @@ -236,8 +236,9 @@ else: raise AssertionError(kind) - gcref = rgc.cast_instance_to_gcref(value) - value = rgc._make_sure_does_not_move(gcref) + gcref = cast_instance_to_gcref(value) + rgc._make_sure_does_not_move(gcref) + value = rffi.cast(lltype.Signed, gcref) je_location = self._call_assembler_check_descr(value, tmploc) # # Path A: use assembler_helper_adr diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -5,8 +5,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rclass, rstr from rpython.rtyper.lltypesystem import llgroup from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.annlowlevel import (llhelper, cast_instance_to_gcref, - cast_base_ptr_to_instance) +from rpython.rtyper.annlowlevel import llhelper, cast_instance_to_gcref from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.jit.codewriter import heaptracker from rpython.jit.metainterp.history import ConstPtr, AbstractDescr @@ -97,17 +96,12 @@ for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): - v.imm_value = rgc._make_sure_does_not_move(v.value) - # XXX: fix for stm, record imm_values and unregister - # them again (below too): - gcrefs_output_list.append(v.value) - - if self.stm: - return # for descr, we do it on the fly in assembler.py + p = v.value + rgc._make_sure_does_not_move(p) + gcrefs_output_list.append(p) if op.is_guard() or op.getopnum() == rop.FINISH: # the only ops with descrs that get recorded in a trace - descr = op.getdescr() - llref = rgc.cast_instance_to_gcref(descr) + llref = cast_instance_to_gcref(op.getdescr()) rgc._make_sure_does_not_move(llref) gcrefs_output_list.append(llref) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -341,8 +341,8 @@ self._store_and_reset_exception(self.mc, eax) ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') self.mc.MOV_br(ofs, eax.value) - propagate_exception_descr = rgc._make_sure_does_not_move( - rgc.cast_instance_to_gcref(self.cpu.propagate_exception_descr)) + propagate_exception_descr = rffi.cast(lltype.Signed, + cast_instance_to_gcref(self.cpu.propagate_exception_descr)) ofs = self.cpu.get_ofs_of_frame_field('jf_descr') self.mc.MOV(RawEbpLoc(ofs), imm(propagate_exception_descr)) # @@ -2298,10 +2298,10 @@ cb.emit() def _store_force_index(self, guard_op): - faildescr = rgc._make_sure_does_not_move( - rgc.cast_instance_to_gcref(guard_op.getdescr())) + faildescr = guard_op.getdescr() ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - self.mc.MOV(raw_stack(ofs), imm(faildescr)) + self.mc.MOV(raw_stack(ofs), imm(rffi.cast(lltype.Signed, + cast_instance_to_gcref(faildescr)))) def _emit_guard_not_forced(self, guard_token): ofs = self.cpu.get_ofs_of_frame_field('jf_descr') diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -369,9 +369,10 @@ # the frame is in ebp, but we have to point where in the frame is # the potential argument to FINISH descr = op.getdescr() - fail_descr = rgc.cast_instance_to_gcref(descr) + fail_descr = cast_instance_to_gcref(descr) # we know it does not move, but well - fail_descr = rgc._make_sure_does_not_move(fail_descr) + rgc._make_sure_does_not_move(fail_descr) + fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] From noreply at buildbot.pypy.org Thu Mar 20 11:17:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 11:17:16 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: stm_ptr_eq dies Message-ID: <20140320101716.76EC11C08F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70109:1f090ff1b51f Date: 2014-03-20 11:16 +0100 http://bitbucket.org/pypy/pypy/changeset/1f090ff1b51f/ Log: stm_ptr_eq dies diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -706,21 +706,9 @@ if self.stm: # XXX remove the indirections in the following calls from rpython.rlib import rstm - def stm_allocate_nonmovable_int_adr(obj): - return llop1.stm_allocate_nonmovable_int_adr( - lltype.Signed, obj) - self.generate_function('stm_allocate_nonmovable_int_adr', - stm_allocate_nonmovable_int_adr, - [llmemory.GCREF], RESULT=lltype.Signed) self.generate_function('stm_try_inevitable', rstm.become_inevitable, [], RESULT=lltype.Void) - def ptr_eq(x, y): return x == y - def ptr_ne(x, y): return x != y - self.generate_function('stm_ptr_eq', ptr_eq, [llmemory.GCREF] * 2, - RESULT=lltype.Bool) - self.generate_function('stm_ptr_ne', ptr_ne, [llmemory.GCREF] * 2, - RESULT=lltype.Bool) def _bh_malloc(self, sizedescr): from rpython.memory.gctypelayout import check_typeid diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1253,58 +1253,6 @@ self.mc.LEA_rm(result_loc.value, (loc.value, delta)) return genop_binary_or_lea - - def genop_ptr_eq(self, op, arglocs, result_loc): - if not self.cpu.gc_ll_descr.stm: - self.genop_int_eq(op, arglocs, result_loc) - return - assert self.cpu.gc_ll_descr.stm - rl = result_loc.lowest8bits() - self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) - self.mc.SET_ir(rx86.Conditions['NZ'], rl.value) - self.mc.MOVZX8_rr(result_loc.value, rl.value) - - def genop_ptr_ne(self, op, arglocs, result_loc): - if not self.cpu.gc_ll_descr.stm: - self.genop_int_ne(op, arglocs, result_loc) - return - assert self.cpu.gc_ll_descr.stm - rl = result_loc.lowest8bits() - self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) - self.mc.SET_ir(rx86.Conditions['Z'], rl.value) - self.mc.MOVZX8_rr(result_loc.value, rl.value) - - def genop_guard_ptr_eq(self, op, guard_op, guard_token, - arglocs, result_loc): - if not self.cpu.gc_ll_descr.stm: - self.genop_guard_int_eq(op, guard_op, guard_token, - arglocs, result_loc) - return - assert self.cpu.gc_ll_descr.stm - guard_opnum = guard_op.getopnum() - self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) - # p1==p2 -> NZ - if guard_opnum == rop.GUARD_FALSE: - # jump to failure-code if ptrs are equal - self.implement_guard(guard_token, "NZ") - else: - # jump to failure-code if ptrs are not equal - self.implement_guard(guard_token, "Z") - - def genop_guard_ptr_ne(self, op, guard_op, guard_token, - arglocs, result_loc): - if not self.cpu.gc_ll_descr.stm: - self.genop_guard_int_ne(op, guard_op, guard_token, - arglocs, result_loc) - return - assert self.cpu.gc_ll_descr.stm - guard_opnum = guard_op.getopnum() - self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) - if guard_opnum == rop.GUARD_FALSE: - self.implement_guard(guard_token, "Z") - else: - self.implement_guard(guard_token, "NZ") - def _cmpop(cond, rev_cond): def genop_cmp(self, op, arglocs, result_loc): rl = result_loc.lowest8bits() @@ -1453,8 +1401,8 @@ genop_int_ne = _cmpop("NE", "NE") genop_int_gt = _cmpop("G", "L") genop_int_ge = _cmpop("GE", "LE") - genop_instance_ptr_eq = genop_ptr_eq - genop_instance_ptr_ne = genop_ptr_ne + genop_ptr_eq = genop_instance_ptr_eq = genop_int_eq + genop_ptr_ne = genop_instance_ptr_ne = genop_int_ne genop_float_lt = _cmpop_float('B', 'A') genop_float_le = _cmpop_float('BE', 'AE') @@ -1474,8 +1422,8 @@ genop_guard_int_ne = _cmpop_guard("NE", "NE", "E", "E") genop_guard_int_gt = _cmpop_guard("G", "L", "LE", "GE") genop_guard_int_ge = _cmpop_guard("GE", "LE", "L", "G") - genop_guard_instance_ptr_eq = genop_guard_ptr_eq - genop_guard_instance_ptr_ne = genop_guard_ptr_ne + genop_guard_ptr_eq = genop_guard_instance_ptr_eq = genop_guard_int_eq + genop_guard_ptr_ne = genop_guard_instance_ptr_ne = genop_guard_int_ne genop_guard_uint_gt = _cmpop_guard("A", "B", "BE", "AE") genop_guard_uint_lt = _cmpop_guard("B", "A", "AE", "BE") From noreply at buildbot.pypy.org Thu Mar 20 11:24:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 11:24:11 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: sorry :-/ cond_call_stm_b, stm_set_revision_gc, and more stm_ptr_eq all die Message-ID: <20140320102411.2A4551C08F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70110:26e595d6e017 Date: 2014-03-20 11:23 +0100 http://bitbucket.org/pypy/pypy/changeset/26e595d6e017/ Log: sorry :-/ cond_call_stm_b, stm_set_revision_gc, and more stm_ptr_eq all die diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1029,9 +1029,6 @@ def execute_cond_call_gc_wb_array(self, descr, a, b): py.test.skip("cond_call_gc_wb_array not supported") - def execute_cond_call_stm_b(self, descr, a): - py.test.skip("cond_call_stm_b not supported") - def execute_stm_transaction_break(self, _, really_wanted): pass diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2374,257 +2374,7 @@ self.mc.overwrite(jmp_location - 1, chr(offset)) # ------------------- END CALL ASSEMBLER ----------------------- - def _stm_ptr_eq_fastpath(self, mc, arglocs, result_loc): - assert self.cpu.gc_ll_descr.stm - assert self.ptr_eq_slowpath is not None - a_base = arglocs[0] - b_base = arglocs[1] - if isinstance(a_base, ImmedLoc): - # make sure there is a non-immed as the first - # argument to mc.CMP(). (2 immeds are caught below) - a_base, b_base = b_base, a_base - - # - # FASTPATH - # - # a == b -> SET NZ - if isinstance(a_base, ImmedLoc) and isinstance(b_base, ImmedLoc): - if a_base.getint() == b_base.getint(): - mc.MOV_ri(X86_64_SCRATCH_REG.value, 1) - mc.TEST(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) # NZ flag - mc.JMP_l8(0) - j_ok1 = mc.get_relative_pos() - else: - j_ok1 = 0 - else: - mc.CMP(a_base, b_base) - # reverse flags: if p1==p2, set NZ - sl = X86_64_SCRATCH_REG.lowest8bits() - mc.SET_ir(rx86.Conditions['Z'], sl.value) - mc.TEST8_rr(sl.value, sl.value) - mc.J_il8(rx86.Conditions['NZ'], 0) - j_ok1 = mc.get_relative_pos() - skip = False - # a == 0 || b == 0 -> SET Z - j_ok2 = 0 - if isinstance(a_base, ImmedLoc): - if a_base.getint() == 0: - # set Z flag: - mc.XOR(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) - skip = True - else: - mc.CMP(a_base, imm(0)) - mc.J_il8(rx86.Conditions['Z'], 0) - j_ok2 = mc.get_relative_pos() - # - j_ok3 = 0 - if not skip: - if isinstance(b_base, ImmedLoc): - if b_base.getint() == 0: - # set Z flag: - mc.XOR(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) - skip = True - else: - mc.CMP(b_base, imm(0)) - mc.J_il8(rx86.Conditions['Z'], 0) - j_ok3 = mc.get_relative_pos() - # a.type != b.type - # XXX: todo, if it ever happens.. - - # - # SLOWPATH - # - if not skip: - mc.PUSH(b_base) - mc.PUSH(a_base) - func = self.ptr_eq_slowpath - mc.CALL(imm(func)) - # result still on stack - mc.POP_r(X86_64_SCRATCH_REG.value) - # _Bool return type only sets lower 8 bits of return value - sl = X86_64_SCRATCH_REG.lowest8bits() - mc.TEST8_rr(sl.value, sl.value) - # - # END SLOWPATH - # - - # OK: flags already set - if j_ok1: - offset = mc.get_relative_pos() - j_ok1 - assert 0 <= offset <= 127 - mc.overwrite(j_ok1 - 1, chr(offset)) - if j_ok2: - offset = mc.get_relative_pos() - j_ok2 - assert 0 <= offset <= 127 - mc.overwrite(j_ok2 - 1, chr(offset)) - if j_ok3: - offset = mc.get_relative_pos() - j_ok3 - assert 0 <= offset <= 127 - mc.overwrite(j_ok3 - 1, chr(offset)) - - def _get_stm_private_rev_num_addr(self): - return self._get_stm_tl(rstm.get_adr_of_private_rev_num()) - - def _get_stm_read_barrier_cache_addr(self): - return self._get_stm_tl(rstm.get_adr_of_read_barrier_cache()) - - def _stm_barrier_fastpath(self, mc, descr, arglocs, is_frame=False, - align_stack=False): - assert self.cpu.gc_ll_descr.stm - if we_are_translated(): - # tests use a a mock class, but translation needs it - assert isinstance(descr, STMBarrierDescr) - assert descr.returns_modified_object - loc_base = arglocs[0] - assert isinstance(loc_base, RegLoc) - - helper_num = 0 - if is_frame: - helper_num = 4 - elif self._regalloc is not None and self._regalloc.xrm.reg_bindings: - helper_num += 2 - # - # FASTPATH: - # do slowpath IF: - # A2W: - # (obj->h_revision != stm_private_rev_num) - # || (obj->h_tid & GCFLAG_WRITE_BARRIER) != 0) - # V2W: - # (obj->h_tid & GCFLAG_WRITE_BARRIER) != 0) - # A2V: - # (obj->h_revision != stm_private_rev_num) - # A2R: - # (obj->h_revision != stm_private_rev_num) - # && (FXCACHE_AT(obj) != obj))) - # Q2R: - # (obj->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | GCFLAG_MOVED) != 0) - # A2I: - # (obj->h_tid & GCFLAG_STUB) - if IS_X86_32: # XXX: todo - todo() - jz_location = 0 - jz_location2 = 0 - jnz_location = 0 - # compare h_revision with stm_private_rev_num - if descr.stmcat in ['A2W', 'A2R', 'A2V']: - rn = self._get_stm_private_rev_num_addr() - if we_are_translated(): - # during tests, _get_stm_private_rev_num_addr returns - # an absolute address, not a tl-offset - self._tl_segment_if_stm(mc) - mc.MOV_rj(X86_64_SCRATCH_REG.value, rn) - else: # testing: - mc.MOV(X86_64_SCRATCH_REG, heap(rn)) - - if loc_base == ebp: - mc.CMP_rb(X86_64_SCRATCH_REG.value, StmGC.H_REVISION) - else: - mc.CMP(X86_64_SCRATCH_REG, mem(loc_base, StmGC.H_REVISION)) - # - if descr.stmcat in ('A2R', 'A2V'): - # jump to end if h_rev==priv_rev - mc.J_il8(rx86.Conditions['Z'], 0) # patched below - jz_location = mc.get_relative_pos() - else: # A2W - # jump to slowpath if h_rev!=priv_rev - mc.J_il8(rx86.Conditions['NZ'], 0) # patched below - jnz_location = mc.get_relative_pos() - # - # FXCACHE_AT(obj) != obj - if descr.stmcat == 'A2R': - # calculate: temp = obj & FX_MASK - assert StmGC.FX_MASK == 65535 - assert not is_frame - mc.MOVZX16(X86_64_SCRATCH_REG, loc_base) - # calculate: rbc + temp == obj - rbc = self._get_stm_read_barrier_cache_addr() - if we_are_translated(): - # during tests, _get_stm_rbca returns - # an absolute address, not a tl-offset - self._tl_segment_if_stm(mc) - mc.ADD_rj(X86_64_SCRATCH_REG.value, rbc) - else: # testing: - mc.PUSH_r(eax.value) - mc.MOV_ri(eax.value, rbc) - mc.MOV_rm(eax.value, (eax.value, 0)) - mc.ADD(X86_64_SCRATCH_REG, eax) - mc.POP_r(eax.value) - mc.CMP_rm(loc_base.value, (X86_64_SCRATCH_REG.value, 0)) - mc.J_il8(rx86.Conditions['Z'], 0) # patched below - jz_location2 = mc.get_relative_pos() - # - # check flags: - if descr.stmcat in ['A2W', 'V2W', 'Q2R', 'A2I']: - flags = 0 - off = 0 - if descr.stmcat in ['A2W', 'V2W']: - # obj->h_tid & GCFLAG_WRITE_BARRIER) != 0 - flags = StmGC.GCFLAG_WRITE_BARRIER - elif descr.stmcat == 'Q2R': - # obj->h_tid & PUBLIC_TO_PRIVATE|MOVED - flags = StmGC.GCFLAG_PUBLIC_TO_PRIVATE | StmGC.GCFLAG_MOVED - elif descr.stmcat == 'A2I': - # obj->h_tid & STUB - flags = StmGC.GCFLAG_STUB - - assert IS_X86_64 - if (flags >> 32) > 0 and (flags >> 40) == 0: - flags = flags >> 32 - off = 4 - elif (flags >> 40) > 0 and (flags >> 48) == 0: - flags = flags >> 40 - off = 5 - # - if loc_base == ebp: - mc.TEST8_bi(StmGC.H_TID + off, flags) - else: - mc.TEST8_mi((loc_base.value, StmGC.H_TID + off), flags) - - mc.J_il8(rx86.Conditions['Z'], 0) # patched below - jz_location = mc.get_relative_pos() - # if flags not set, jump to end - # jump target slowpath: - if jnz_location: - offset = mc.get_relative_pos() - jnz_location - assert 0 < offset <= 127 - mc.overwrite(jnz_location - 1, chr(offset)) - # - # SLOWPATH_START - # - if not is_frame: - mc.PUSH(loc_base) - elif is_frame and align_stack: - # ||retadr| - mc.SUB_ri(esp.value, 16 - WORD) # erase the return address - # ||retadr|...|| - func = descr.get_b_slowpath(helper_num) - assert func != 0 - mc.CALL(imm(func)) - # get result: - if is_frame: - # result already written back to ebp - assert loc_base is ebp - else: - # result where argument was: - mc.POP_r(loc_base.value) - - if is_frame and align_stack: - mc.ADD_ri(esp.value, 16 - WORD) # erase the return address - # - # SLOWPATH_END - # - # jump target end: - offset = mc.get_relative_pos() - jz_location - assert 0 < offset <= 127 - mc.overwrite(jz_location - 1, chr(offset)) - if descr.stmcat == 'A2R':#isinstance(descr, STMReadBarrierDescr): - offset = mc.get_relative_pos() - jz_location2 - assert 0 < offset <= 127 - mc.overwrite(jz_location2 - 1, chr(offset)) - - - def _write_barrier_fastpath(self, mc, descr, arglocs, array=False, is_frame=False, align_stack=False): # Write code equivalent to write_barrier() in the GC: it checks @@ -2750,9 +2500,6 @@ self._write_barrier_fastpath(self.mc, op.getdescr(), arglocs, array=True) - def genop_discard_cond_call_stm_b(self, op, arglocs): - self._stm_barrier_fastpath(self.mc, op.getdescr(), arglocs) - def not_implemented_op_discard(self, op, arglocs): not_implemented("not implemented operation: %s" % op.getopname()) @@ -3114,21 +2861,6 @@ assert isinstance(reg, RegLoc) self.mc.MOV_rr(reg.value, ebp.value) - def genop_discard_stm_set_revision_gc(self, op, arglocs): - base_loc, ofs_loc, size_loc = arglocs - assert isinstance(size_loc, ImmedLoc) - mc = self.mc - - if IS_X86_32: - todo() - - rn = self._get_stm_private_rev_num_addr() - self._tl_segment_if_stm(mc) - mc.MOV_rj(X86_64_SCRATCH_REG.value, rn) - - dest_addr = AddressLoc(base_loc, ofs_loc) - mc.MOV(dest_addr, X86_64_SCRATCH_REG) - def genop_guard_stm_transaction_break(self, op, guard_op, guard_token, arglocs, result_loc): assert self.cpu.gc_ll_descr.stm diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -801,21 +801,6 @@ for i in range(N)] self.perform_discard(op, arglocs) - def consider_cond_call_stm_b(self, op): - assert op.result is None - # we force all arguments in a reg (unless they are Consts), - # because it will be needed anyway by the following setfield_gc - # or setarrayitem_gc. It avoids loading it twice from the memory. - arg = op.getarg(0) - argloc = self.rm.make_sure_var_in_reg(arg) - self.perform_discard(op, [argloc]) - - # if 'arg' is in two locations (once in argloc and once spilled - # on the frame), we need to ensure that both locations are - # updated with the possibly changed reference. - self.rm.update_spill_loc_if_necessary(arg, argloc) - - consider_cond_call_gc_wb_array = consider_cond_call_gc_wb def consider_cond_call(self, op): @@ -1265,17 +1250,7 @@ if isinstance(loc, FrameLoc): self.fm.hint_frame_pos[box] = self.fm.get_loc_index(loc) - - def consider_stm_set_revision_gc(self, op): - ofs, size, _ = unpack_fielddescr(op.getdescr()) - ofs_loc = imm(ofs) - size_loc = imm(size) - assert isinstance(size_loc, ImmedLoc) - args = op.getarglist() - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) - self.perform_discard(op, [base_loc, ofs_loc, size_loc]) - def consider_stm_transaction_break(self, op, guard_op): # # only save regs for the should_break_transaction call diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -335,7 +335,6 @@ rop.INCREMENT_DEBUG_COUNTER, rop.COND_CALL_GC_WB, rop.COND_CALL_GC_WB_ARRAY, - rop.COND_CALL_STM_B, rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, @@ -347,7 +346,6 @@ rop.CALL_MALLOC_NURSERY_VARSIZE, rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, rop.LABEL, - rop.STM_SET_REVISION_GC, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -499,7 +499,6 @@ 'SETFIELD_RAW/2d', 'STRSETITEM/3', 'UNICODESETITEM/3', - 'COND_CALL_STM_B/1d', # objptr (write/read barrier) 'COND_CALL_GC_WB/1d', # [objptr] (for the write barrier) 'COND_CALL_GC_WB_ARRAY/2d', # [objptr, arrayindex] (write barr. for array) 'DEBUG_MERGE_POINT/*', # debugging only @@ -511,7 +510,6 @@ 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] 'KEEPALIVE/1', 'STM_TRANSACTION_BREAK/1', - 'STM_SET_REVISION_GC/1d', # not really GC, writes raw to the header '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', From noreply at buildbot.pypy.org Thu Mar 20 11:32:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 11:32:23 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fix tests here Message-ID: <20140320103223.B04531D24ED@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70111:60e88988d828 Date: 2014-03-20 11:31 +0100 http://bitbucket.org/pypy/pypy/changeset/60e88988d828/ Log: fix tests here diff --git a/rpython/jit/codewriter/test/test_call.py b/rpython/jit/codewriter/test/test_call.py --- a/rpython/jit/codewriter/test/test_call.py +++ b/rpython/jit/codewriter/test/test_call.py @@ -207,7 +207,7 @@ T = rffi.CArrayPtr(rffi.TIME_T) external = rffi.llexternal("time", [T], rffi.TIME_T, _nowrapper=True, - threadsafe=False, + releasegil=False, transactionsafe=transactionsafe) @jit.dont_look_inside diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -921,6 +921,7 @@ assert block.exits[0].args == [v1] def test_jit_stm_transaction_break_point(): + py.test.skip("XXX?") op = SpaceOperation('jit_stm_transaction_break_point', [Constant(1, lltype.Signed)], lltype.Void) tr = Transformer() From noreply at buildbot.pypy.org Thu Mar 20 13:16:01 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 20 Mar 2014 13:16:01 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Refactoring to remove rerased and use two separate variables for wrapped and unwrapped storage. Message-ID: <20140320121601.D7D741C08F3@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r669:f63aa764c7bf Date: 2014-03-20 11:39 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/f63aa764c7bf/ Log: Refactoring to remove rerased and use two separate variables for wrapped and unwrapped storage. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -653,16 +653,15 @@ strategy_stats = StrategyStatistics() class W_PointersObject(W_AbstractPointersObject): - _attrs_ = ['_storage', 'strategy'] + _attrs_ = ['_size', '_storage', 'int_storage', 'strategy'] @jit.unroll_safe def __init__(self, space, w_class, size): from spyvm.strategies import strategy_of_size """Create new object with size = fixed + variable size.""" W_AbstractPointersObject.__init__(self, space, w_class, size) - # TODO - setting strategy/storage is useless if fillin() will be called afterwards. self.strategy = strategy_of_size(self.s_class, size) - self.set_storage(self.strategy.initial_storage(space, size)) + self.set_storage(space, size) self.log_strategy_operation("Initialized") def log_strategy_operation(self, op, old_strategy=None): @@ -680,11 +679,12 @@ if strategy_stats.do_log: strategy_stats.log_operation(op, new_strategy_tag, old_strategy_tag, classname, size) - def set_storage(self, storage): - self._storage = storage - - def get_storage(self): - return self._storage + def set_storage(self, space, size): + self._size = size + if self.strategy.uses_int_storage: + self.int_storage = self.strategy.initial_int_storage(space, size) + else: + self._storage = self.strategy.initial_storage(space, size) def get_strategy(self): return self.strategy @@ -692,7 +692,11 @@ def fillin_pointers(self, space, collection): from spyvm.strategies import strategy_for_list self.strategy = strategy_for_list(self.s_class, collection) - self.set_storage(self.strategy.storage_for_list(space, collection)) + self._size = len(collection) + if self.strategy.uses_int_storage: + self.int_storage = self.strategy.int_storage_for_list(space, collection) + else: + self._storage = self.strategy.storage_for_list(space, collection) def fillin(self, space, g_self): W_AbstractPointersObject.fillin(self, space, g_self) @@ -701,10 +705,12 @@ def switch_strategy(self, space, new_strategy): assert self.strategy != new_strategy - new_storage = new_strategy.copy_storage_from(space, self, reuse_storage=True) + if new_strategy.uses_int_storage: + self.int_storage = new_strategy.copy_int_storage_from(space, self, reuse_storage=True) + else: + self._storage = new_strategy.copy_storage_from(space, self, reuse_storage=True) old_strategy = self.strategy self.strategy = new_strategy - self.set_storage(new_storage) self.log_strategy_operation("Switched", old_strategy) def store_with_new_strategy(self, space, new_strategy, n0, w_val): @@ -735,15 +741,15 @@ return self.get_strategy().store(space, self, n0, w_value) def basic_size(self): - return self.get_strategy().size_of(self) + return self._size def become(self, w_other): if not isinstance(w_other, W_PointersObject): return False self.strategy, w_other.strategy = w_other.strategy, self.strategy - self_storage = self._storage - self.set_storage(w_other._storage) - w_other.set_storage(self_storage) + self._size, w_other._size = w_other._size, self._size + self._storage, w_other._storage = w_other._storage, self._storage + self.int_storage, w_other.int_storage = w_other.int_storage, self.int_storage return W_AbstractPointersObject.become(self, w_other) @jit.unroll_safe diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -165,7 +165,7 @@ from spyvm.strategies import ListStorageStrategy w_nil.space = self w_nil.strategy = ListStorageStrategy.singleton - w_nil.set_storage(w_nil.strategy.initial_storage(self, 0)) + w_nil.set_storage(self, 0) w_nil.s_class = self.classtable['w_UndefinedObject'].as_class_get_penumbra(self) return w_nil w_nil = self.w_nil = patch_nil(model.w_nil) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -432,7 +432,7 @@ self.copy_from_w_self(i) except error.SenderChainManipulation, e: assert e.s_context == self - w_self.set_storage(w_self.strategy.initial_storage(self.space, 0)) + w_self.set_storage(self.space, 0) # def detach_shadow(self): # w_self = self.w_self() diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -14,6 +14,7 @@ _attrs_ = [] _settled_ = True strategy_tag = 'abstract' + uses_int_storage = False def __init__(self): pass @@ -31,14 +32,23 @@ def initial_storage(self, space, size): raise NotImplementedError("Abstract base class") + def initial_int_storage(self, space, size): + raise NotImplementedError("Abstract base class") def storage_for_list(self, space, collection): raise NotImplementedError("Abstract base class") + def int_storage_for_list(self, space, collection): + raise NotImplementedError("Abstract base class") + def copy_int_storage_from(self, space, w_obj, reuse_storage=False): + old_strategy = w_obj.strategy + if old_strategy == self and reuse_storage: + return w_obj.int_storage + else: + # This can be overridden and optimized (reuse_storage flag, less temporary storage) + return self.int_storage_for_list(space, w_obj.fetch_all(space)) def copy_storage_from(self, space, w_obj, reuse_storage=False): old_strategy = w_obj.strategy if old_strategy == self and reuse_storage: - return w_obj.get_storage() - if isinstance(old_strategy, AllNilStorageStrategy): - return self.initial_storage(space, old_strategy.size_of(w_obj)) + return w_obj._storage else: # This can be overridden and optimized (reuse_storage flag, less temporary storage) return self.storage_for_list(space, w_obj.fetch_all(space)) @@ -51,22 +61,24 @@ class BasicStorageStrategyMixin(object): # Concrete class must implement: unerase + def size_of(self, w_obj): + if self.uses_int_storage: + return len(self.int_storage(w_obj)) + else: + return len(self.storage(w_obj)) + def int_storage(self, w_obj): + return w_obj.int_storage def storage(self, w_obj): - return self.unerase(w_obj.get_storage()) - -# This is a container for an int-value to be used with a rerased-pair -class SizeStorage(object): - _attrs_ = ['size'] - _settled_ = True - def __init__(self, size): - self.size = size + return w_obj._storage + def erase(self, a): return a + def unerase(self, a): return a # this is the typical "initial" storage strategy, for when every slot # in a var-sized object is still nil. No storage is allocated except for # holding the size of the object. class AllNilStorageStrategy(AbstractStorageStrategy): __metaclass__ = SingletonMeta - erase, unerase = rerased.new_static_erasing_pair("all-nil-strategy") + # erase, unerase = rerased.new_static_erasing_pair("all-nil-strategy") import_from_mixin(BasicStorageStrategyMixin) strategy_tag = 'allnil' @@ -82,21 +94,19 @@ return w_obj.store_with_new_strategy(space, TaggingSmallIntegerStorageStrategy.singleton, n0, w_val) return w_obj.store_with_new_strategy(space, ListStorageStrategy.singleton, n0, w_val) - def size_of(self, w_obj): - return self.storage(w_obj).size def initial_storage(self, space, size): - return self.erase(SizeStorage(size)) + return [] def storage_for_list(self, space, collection): - return self.erase(SizeStorage(len(collection))) + return [] def copy_storage_from(self, space, w_obj, reuse_storage=False): - return self.erase(SizeStorage(w_obj.basic_size())) + return [] # This is the regular storage strategy that does not result in any # optimizations but can handle every case. Applicable for both # fixed-sized and var-sized objects. class ListStorageStrategy(AbstractStorageStrategy): __metaclass__ = SingletonMeta - erase, unerase = rerased.new_static_erasing_pair("list-storage-strategy") + # erase, unerase = rerased.new_static_erasing_pair("list-storage-strategy") import_from_mixin(BasicStorageStrategyMixin) strategy_tag = 'list' @@ -105,8 +115,6 @@ def store(self, space, w_obj, n0, w_val): # TODO enable generalization by maintaining a counter of elements that are nil. self.storage(w_obj)[n0] = w_val - def size_of(self, w_obj): - return len(self.storage(w_obj)) def erased_list(self, list): make_sure_not_resized(list) return self.erase(list) @@ -117,147 +125,13 @@ def copy_storage_from(self, space, w_obj, reuse_storage=False): length = w_obj.basic_size() return self.erased_list([w_obj.strategy.fetch(space, w_obj, i) for i in range(length)]) - -class DenseStorage(object): - # Subclass must provide attribute: default_element - _immutable_fields_ = ['arr'] - _attrs_ = ['arr', '_from', '_to'] - _settled_ = True - - def __init__(self, _from, _to, size): - self._from = _from # first used index ("inclusive") - self._to = _to # first unused index ("exclusive") - self.arr = [self.default_element] * size - make_sure_not_resized(self.arr) - -class DenseStorageStrategyMixin(object): - # Concrete class must implement: storage, erase, do_fetch, do_store, sparse_strategy - # Concrete class must provide attributes: storage_type (subclass of DenseStorage) - - def fetch(self, space, w_obj, n0): - store = self.storage(w_obj) - if n0 < store._from or n0 >= store._to: - return model.w_nil - return self.do_fetch(space, store.arr, n0) - def store(self, space, w_obj, n0, w_val): - store = self.storage(w_obj) - if not self.can_contain_object(w_val): - if w_val == model.w_nil: - if store._to - 1 == n0: # Optimize Collection >> remove: - store._to = store._to - 1 - elif n0 < store._from or store._to <= n0: - pass # Storing nil to an already-nil position - elif store._from == n0: - store._from = store._from + 1 - else: - # Deletion from the middle of the storage. Deoptimize to sparse storage. - return w_obj.store_with_new_strategy(space, self.sparse_strategy().singleton, n0, w_val) - if store._from == store._to: - # Deleted last element. Generelize to AllNilStorage. - w_obj.switch_strategy(space, AllNilStorageStrategy.singleton) - return - else: - # Storing a non-int - dehomogenize to ListStorage - return w_obj.store_with_new_strategy(space, ListStorageStrategy.singleton, n0, w_val) - if n0 == store._to: # Optimize Collection >> add: - store._to = store._to+1 - elif store._from <= n0 and n0 < store._to: - pass - elif n0 == store._from - 1: # It's ok if this wraps around. - store._from = store._from-1 - else: - if store._from == store._to: - # Initial store to non-zero position. - store._from = n0 - store._to = n0+1 - else: - # Store to a non-dense position. Deoptimize to sparse storage. - return w_obj.store_with_new_strategy(space, self.sparse_strategy().singleton, n0, w_val) - # It is a dense store, so finally store the unwrapped value. - self.do_store(space, store.arr, n0, w_val) - def initial_storage(self, space, size): - return self.erase(self.storage_type(0, 0, size)) - def storage_for_list(self, space, collection): - _from = 0 - while _from < len(collection) and collection[_from] == model.w_nil: - _from = _from+1 - _to = _from - while _to < len(collection) and collection[_to] != model.w_nil: - _to = _to+1 - store = self.storage_type(_from, _to, len(collection)) - for i in range(_from, _to): - self.do_store(space, store.arr, i, collection[i]) - return self.erase(store) - -class SparseStorage(object): - _immutable_fields_ = ['arr', 'nil_flags'] - _attrs_ = ['arr', 'nil_flags'] - _settled_ = True - - def __init__(self, arr, nil_flags): - self.arr = arr - self.nil_flags = nil_flags - make_sure_not_resized(self.arr) - make_sure_not_resized(self.nil_flags) - -class SparseStorageStrategyMixin(object): - # Concrete class must implement: storage, erase, do_fetch, do_store, dense_strategy - # Concrete class must provide attributes: storage_type (Subclass of SparseStorage) - - def fetch(self, space, w_obj, n0): - store = self.storage(w_obj) - if store.nil_flags[n0]: - return model.w_nil - return self.do_fetch(space, store.arr, n0) - def store(self, space, w_obj, n0, w_val): - store = self.storage(w_obj) - if not self.can_contain_object(w_val): - if w_val == model.w_nil: - # TODO - generelize to AllNilStorage by maintaining a counter of nil-elements - store.nil_flags[n0] = True - return - else: - # Storing a wrong type - dehomogenize to ListStorage - return w_obj.store_with_new_strategy(space, ListStorageStrategy.singleton, n0, w_val) - store.nil_flags[n0] = False - self.do_store(space, store.arr, n0, w_val) - def storage_for_size(self, size): - # TODO -- for inlining strategy, the size must be extended!! - # size = size * self.slots_per_object() - return self.storage_type([self.storage_type.default_element] * size, [True] * size) - def initial_storage(self, space, size): - return self.erase(self.storage_for_size(size)) - def storage_for_list(self, space, collection): - length = len(collection) - store = self.storage_for_size(length) - for i in range(length): - if collection[i] != model.w_nil: - store.nil_flags[i] = False - self.do_store(space, store.arr, i, collection[i]) - return self.erase(store) - def copy_storage_from(self, space, w_obj, reuse_storage=False): - old_strategy = w_obj.strategy - if isinstance(old_strategy, self.dense_strategy()): - # Optimized transition from dense to sparse strategy - store = old_strategy.storage(w_obj) - return self.erase(self.copy_from_dense_storage(store, reuse_storage)) - else: - return AbstractStorageStrategy.copy_storage_from(self, space, w_obj, reuse_storage) - def copy_from_dense_storage(self, store, reuse_storage): - # TODO possible optimization: compare len(arr) with _to-_from, use smaller iteration size - nil_flags = [True] * len(store.arr) - for i in range(store._from, store._to): - nil_flags[i] = False - arr = store.arr - if not reuse_storage: - arr = [x for x in arr] - return self.storage_type(arr, nil_flags) class TaggingSmallIntegerStorageStrategy(AbstractStorageStrategy): __metaclass__ = SingletonMeta strategy_tag = 'tagging-small-int' - erase, unerase = rerased.new_static_erasing_pair("tagging-small-integer-strategry") + # erase, unerase = rerased.new_static_erasing_pair("tagging-small-integer-strategry") import_from_mixin(BasicStorageStrategyMixin) + uses_int_storage = True @staticmethod def wrap(val): @@ -266,9 +140,6 @@ def unwrap(val): return val >> 1 @staticmethod - def is_nil(val): - return (val & 1) == 1 - @staticmethod def can_contain(w_val): return isinstance(w_val, model.W_SmallInteger) # TODO - use just a single value to represent nil (max_int-1) @@ -280,35 +151,32 @@ return True def fetch(self, space, w_obj, n0): - val = self.storage(w_obj)[n0] - if (self.is_nil(val)): + val = self.int_storage(w_obj)[n0] + if val == self.nil_value: return space.w_nil else: return space.wrap_int(self.unwrap(val)) def store(self, space, w_obj, n0, w_val): - store = self.storage(w_obj) + store = self.int_storage(w_obj) if self.can_contain(w_val): store[n0] = self.wrap(space.unwrap_int(w_val)) else: - if w_val == model.w_nil: + if w_val == space.w_nil: # TODO - generelize to AllNilStorage by maintaining a counter of nil-elements store[n0] = self.nil_value else: # Storing a wrong type - dehomogenize to ListStorage return w_obj.store_with_new_strategy(space, ListStorageStrategy.singleton, n0, w_val) - def size_of(self, w_obj): - return len(self.storage(w_obj)) - - def initial_storage(self, space, size): + def initial_int_storage(self, space, size): return self.erase([self.nil_value] * size) - def storage_for_list(self, space, collection): + def int_storage_for_list(self, space, collection): length = len(collection) store = [self.nil_value] * length for i in range(length): - if collection[i] != model.w_nil: + if collection[i] != space.w_nil: store[i] = self.wrap(space.unwrap_int(collection[i])) return self.erase(store) @@ -324,8 +192,8 @@ def strategy_for_list(s_containing_class, vars): if s_containing_class is None: - # This is a weird and rare special case for w_nil - return ListStorageStrategy.singleton + # This is a weird and rare special case for w_nil + return ListStorageStrategy.singleton try: is_variable = s_containing_class.isvariable() except AttributeError: diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -16,7 +16,7 @@ def __init__(self, stack): size = 6 + len(stack) + 6 self.strategy = strategies.ListStorageStrategy.singleton - self.set_storage(self.strategy.initial_storage(space, size)) + self.set_storage(space, size) self.store_all(space, [None] * 6 + stack + [space.w_nil] * 6) s_self = self.as_blockcontext_get_shadow() s_self.init_stack_and_temps() From noreply at buildbot.pypy.org Thu Mar 20 13:16:03 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 20 Mar 2014 13:16:03 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Renamed set_storage to initialize_storage and _storage to list_storage. Message-ID: <20140320121603.0F4671C08F3@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r670:dcaa168e8fe8 Date: 2014-03-20 11:53 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/dcaa168e8fe8/ Log: Renamed set_storage to initialize_storage and _storage to list_storage. Moved the special casing based on uses_int_storage into the strategy base class. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -653,7 +653,7 @@ strategy_stats = StrategyStatistics() class W_PointersObject(W_AbstractPointersObject): - _attrs_ = ['_size', '_storage', 'int_storage', 'strategy'] + _attrs_ = ['_size', 'list_storage', 'int_storage', 'strategy'] @jit.unroll_safe def __init__(self, space, w_class, size): @@ -661,7 +661,7 @@ """Create new object with size = fixed + variable size.""" W_AbstractPointersObject.__init__(self, space, w_class, size) self.strategy = strategy_of_size(self.s_class, size) - self.set_storage(space, size) + self.initialize_storage(space, size) self.log_strategy_operation("Initialized") def log_strategy_operation(self, op, old_strategy=None): @@ -679,24 +679,15 @@ if strategy_stats.do_log: strategy_stats.log_operation(op, new_strategy_tag, old_strategy_tag, classname, size) - def set_storage(self, space, size): + def initialize_storage(self, space, size): self._size = size - if self.strategy.uses_int_storage: - self.int_storage = self.strategy.initial_int_storage(space, size) - else: - self._storage = self.strategy.initial_storage(space, size) - - def get_strategy(self): - return self.strategy + self.strategy.set_initial_storage(space, self, size) def fillin_pointers(self, space, collection): from spyvm.strategies import strategy_for_list self.strategy = strategy_for_list(self.s_class, collection) self._size = len(collection) - if self.strategy.uses_int_storage: - self.int_storage = self.strategy.int_storage_for_list(space, collection) - else: - self._storage = self.strategy.storage_for_list(space, collection) + self.strategy.set_storage_for_list(space, self, collection) def fillin(self, space, g_self): W_AbstractPointersObject.fillin(self, space, g_self) @@ -705,10 +696,7 @@ def switch_strategy(self, space, new_strategy): assert self.strategy != new_strategy - if new_strategy.uses_int_storage: - self.int_storage = new_strategy.copy_int_storage_from(space, self, reuse_storage=True) - else: - self._storage = new_strategy.copy_storage_from(space, self, reuse_storage=True) + new_strategy.set_storage_copied_from(space, self, self, reuse_storage=True) old_strategy = self.strategy self.strategy = new_strategy self.log_strategy_operation("Switched", old_strategy) @@ -735,10 +723,10 @@ i = i+1 def _fetch(self, space, n0): - return self.get_strategy().fetch(space, self, n0) + return self.strategy.fetch(space, self, n0) def _store(self, space, n0, w_value): - return self.get_strategy().store(space, self, n0, w_value) + return self.strategy.store(space, self, n0, w_value) def basic_size(self): return self._size @@ -748,7 +736,7 @@ return False self.strategy, w_other.strategy = w_other.strategy, self.strategy self._size, w_other._size = w_other._size, self._size - self._storage, w_other._storage = w_other._storage, self._storage + self.list_storage, w_other.list_storage = w_other.list_storage, self.list_storage self.int_storage, w_other.int_storage = w_other.int_storage, self.int_storage return W_AbstractPointersObject.become(self, w_other) diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -165,7 +165,7 @@ from spyvm.strategies import ListStorageStrategy w_nil.space = self w_nil.strategy = ListStorageStrategy.singleton - w_nil.set_storage(self, 0) + w_nil.initialize_storage(self, 0) w_nil.s_class = self.classtable['w_UndefinedObject'].as_class_get_penumbra(self) return w_nil w_nil = self.w_nil = patch_nil(model.w_nil) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -432,7 +432,7 @@ self.copy_from_w_self(i) except error.SenderChainManipulation, e: assert e.s_context == self - w_self.set_storage(self.space, 0) + w_self.initialize_storage(self.space, 0) # def detach_shadow(self): # w_self = self.w_self() diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -1,8 +1,6 @@ from spyvm import model, shadow -from rpython.rlib import rerased -from rpython.rlib import objectmodel, jit, signature -from rpython.rlib.listsort import TimSort +from rpython.rlib import rerased, objectmodel, jit, signature from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.debug import make_sure_not_resized @@ -23,6 +21,24 @@ # If not, the space-parameter can be passed in as None (probably). return False + def set_initial_storage(self, space, w_obj, size): + if self.uses_int_storage: + w_obj.int_storage = self.initial_int_storage(space, size) + else: + w_obj.list_storage = self.initial_storage(space, size) + + def set_storage_for_list(self, space, w_obj, collection): + if self.uses_int_storage: + w_obj.int_storage = self.int_storage_for_list(space, collection) + else: + w_obj.list_storage = self.storage_for_list(space, collection) + + def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): + if self.uses_int_storage: + w_obj.int_storage = self.int_storage_for_list(space, collection) + else: + w_obj.list_storage = self.storage_for_list(space, collection) + def fetch(self, space, w_obj, n0): raise NotImplementedError("Abstract base class") def store(self, space, w_obj, n0, w_val): @@ -48,7 +64,7 @@ def copy_storage_from(self, space, w_obj, reuse_storage=False): old_strategy = w_obj.strategy if old_strategy == self and reuse_storage: - return w_obj._storage + return w_obj.list_storage else: # This can be overridden and optimized (reuse_storage flag, less temporary storage) return self.storage_for_list(space, w_obj.fetch_all(space)) @@ -69,7 +85,7 @@ def int_storage(self, w_obj): return w_obj.int_storage def storage(self, w_obj): - return w_obj._storage + return w_obj.list_storage def erase(self, a): return a def unerase(self, a): return a @@ -128,7 +144,7 @@ class TaggingSmallIntegerStorageStrategy(AbstractStorageStrategy): __metaclass__ = SingletonMeta - strategy_tag = 'tagging-small-int' + strategy_tag = 'tagging' # erase, unerase = rerased.new_static_erasing_pair("tagging-small-integer-strategry") import_from_mixin(BasicStorageStrategyMixin) uses_int_storage = True diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -16,7 +16,7 @@ def __init__(self, stack): size = 6 + len(stack) + 6 self.strategy = strategies.ListStorageStrategy.singleton - self.set_storage(space, size) + self.initialize_storage(space, size) self.store_all(space, [None] * 6 + stack + [space.w_nil] * 6) s_self = self.as_blockcontext_get_shadow() s_self.init_stack_and_temps() From noreply at buildbot.pypy.org Thu Mar 20 13:16:04 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 20 Mar 2014 13:16:04 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Fixed refactoring. Message-ID: <20140320121604.3AEC41C08F3@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r671:3b7a82b8f4e1 Date: 2014-03-20 11:55 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/3b7a82b8f4e1/ Log: Fixed refactoring. diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -35,9 +35,9 @@ def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): if self.uses_int_storage: - w_obj.int_storage = self.int_storage_for_list(space, collection) + w_obj.int_storage = self.copy_int_storage_from(space, w_source_obj, reuse_storage) else: - w_obj.list_storage = self.storage_for_list(space, collection) + w_obj.list_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) def fetch(self, space, w_obj, n0): raise NotImplementedError("Abstract base class") @@ -54,14 +54,14 @@ raise NotImplementedError("Abstract base class") def int_storage_for_list(self, space, collection): raise NotImplementedError("Abstract base class") - def copy_int_storage_from(self, space, w_obj, reuse_storage=False): + def copy_int_storage_from(self, space, w_obj, reuse_storage): old_strategy = w_obj.strategy if old_strategy == self and reuse_storage: return w_obj.int_storage else: # This can be overridden and optimized (reuse_storage flag, less temporary storage) return self.int_storage_for_list(space, w_obj.fetch_all(space)) - def copy_storage_from(self, space, w_obj, reuse_storage=False): + def copy_storage_from(self, space, w_obj, reuse_storage): old_strategy = w_obj.strategy if old_strategy == self and reuse_storage: return w_obj.list_storage From noreply at buildbot.pypy.org Thu Mar 20 13:16:05 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 20 Mar 2014 13:16:05 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Replaced uses_int_storage special casing in favor of subclassing. Message-ID: <20140320121605.7F2731C08F3@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r672:fd9669dc8f6d Date: 2014-03-20 12:17 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/fd9669dc8f6d/ Log: Replaced uses_int_storage special casing in favor of subclassing. Removed size_of from the strategy interface. Removed make_sure_not_resized. Turned needs_objspace into an attribute. Removed unused imports. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1268,7 +1268,7 @@ if isinstance(w_candidate, W_PointersObject): c_shadow = w_candidate._get_shadow() if c_shadow is None and w_candidate.size() >= 2: - if not w_candidate.strategy.needs_objspace(): + if not w_candidate.strategy.needs_objspace: # We can fetch without having an object space at hand. # XXX How to get an object space from a CompiledMethodShadow, anyways? w_class = w_candidate._fetch(None, 1) diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -1,8 +1,7 @@ from spyvm import model, shadow -from rpython.rlib import rerased, objectmodel, jit, signature +from rpython.rlib import rerased from rpython.rlib.objectmodel import import_from_mixin -from rpython.rlib.debug import make_sure_not_resized # Disables all optimized strategies, for debugging. only_list_storage = False @@ -12,55 +11,36 @@ _attrs_ = [] _settled_ = True strategy_tag = 'abstract' - uses_int_storage = False - - def __init__(self): - pass - def needs_objspace(self): - # Return True, if fetch/store operations use the space parameter. - # If not, the space-parameter can be passed in as None (probably). - return False + needs_objspace = False def set_initial_storage(self, space, w_obj, size): - if self.uses_int_storage: - w_obj.int_storage = self.initial_int_storage(space, size) - else: - w_obj.list_storage = self.initial_storage(space, size) - + raise NotImplementedError("Abstract base class") def set_storage_for_list(self, space, w_obj, collection): - if self.uses_int_storage: - w_obj.int_storage = self.int_storage_for_list(space, collection) - else: - w_obj.list_storage = self.storage_for_list(space, collection) - + raise NotImplementedError("Abstract base class") def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): - if self.uses_int_storage: - w_obj.int_storage = self.copy_int_storage_from(space, w_source_obj, reuse_storage) - else: - w_obj.list_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) + raise NotImplementedError("Abstract base class") def fetch(self, space, w_obj, n0): raise NotImplementedError("Abstract base class") def store(self, space, w_obj, n0, w_val): raise NotImplementedError("Abstract base class") - def size_of(self, w_obj): - raise NotImplementedError("Abstract base class") + +class AbstractListStorageStrategy(AbstractStorageStrategy): + strategy_tag = 'abstract-list' + + def storage(self, w_obj): + return w_obj.list_storage + def set_initial_storage(self, space, w_obj, size): + w_obj.list_storage = self.initial_storage(space, size) + def set_storage_for_list(self, space, w_obj, collection): + w_obj.list_storage = self.storage_for_list(space, collection) + def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): + w_obj.list_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) def initial_storage(self, space, size): raise NotImplementedError("Abstract base class") - def initial_int_storage(self, space, size): - raise NotImplementedError("Abstract base class") def storage_for_list(self, space, collection): raise NotImplementedError("Abstract base class") - def int_storage_for_list(self, space, collection): - raise NotImplementedError("Abstract base class") - def copy_int_storage_from(self, space, w_obj, reuse_storage): - old_strategy = w_obj.strategy - if old_strategy == self and reuse_storage: - return w_obj.int_storage - else: - # This can be overridden and optimized (reuse_storage flag, less temporary storage) - return self.int_storage_for_list(space, w_obj.fetch_all(space)) def copy_storage_from(self, space, w_obj, reuse_storage): old_strategy = w_obj.strategy if old_strategy == self and reuse_storage: @@ -68,6 +48,30 @@ else: # This can be overridden and optimized (reuse_storage flag, less temporary storage) return self.storage_for_list(space, w_obj.fetch_all(space)) + +class AbstractIntStorageStrategy(AbstractStorageStrategy): + strategy_tag = 'abstract-int' + + def storage(self, w_obj): + return w_obj.int_storage + def set_initial_storage(self, space, w_obj, size): + w_obj.int_storage = self.initial_storage(space, size) + def set_storage_for_list(self, space, w_obj, collection): + w_obj.int_storage = self.storage_for_list(space, collection) + def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): + w_obj.int_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) + + def initial_storage(self, space, size): + raise NotImplementedError("Abstract base class") + def storage_for_list(self, space, collection): + raise NotImplementedError("Abstract base class") + def copy_storage_from(self, space, w_obj, reuse_storage): + old_strategy = w_obj.strategy + if old_strategy == self and reuse_storage: + return w_obj.int_storage + else: + # This can be overridden and optimized (reuse_storage flag, less temporary storage) + return self.int_storage_for_list(space, w_obj.fetch_all(space)) class SingletonMeta(type): def __new__(cls, name, bases, dct): @@ -76,25 +80,15 @@ return result class BasicStorageStrategyMixin(object): - # Concrete class must implement: unerase - def size_of(self, w_obj): - if self.uses_int_storage: - return len(self.int_storage(w_obj)) - else: - return len(self.storage(w_obj)) - def int_storage(self, w_obj): - return w_obj.int_storage - def storage(self, w_obj): - return w_obj.list_storage def erase(self, a): return a def unerase(self, a): return a + # erase, unerase = rerased.new_static_erasing_pair(self.strategy_tag) # this is the typical "initial" storage strategy, for when every slot -# in a var-sized object is still nil. No storage is allocated except for -# holding the size of the object. +# in an object is still nil. No storage is allocated. class AllNilStorageStrategy(AbstractStorageStrategy): __metaclass__ = SingletonMeta - # erase, unerase = rerased.new_static_erasing_pair("all-nil-strategy") + # erase, unerase = rerased.new_static_erasing_pair("allnil-strategy") import_from_mixin(BasicStorageStrategyMixin) strategy_tag = 'allnil' @@ -110,19 +104,19 @@ return w_obj.store_with_new_strategy(space, TaggingSmallIntegerStorageStrategy.singleton, n0, w_val) return w_obj.store_with_new_strategy(space, ListStorageStrategy.singleton, n0, w_val) - def initial_storage(self, space, size): - return [] - def storage_for_list(self, space, collection): - return [] - def copy_storage_from(self, space, w_obj, reuse_storage=False): - return [] + def set_initial_storage(self, space, w_obj, size): + pass + def set_storage_for_list(self, space, w_obj, collection): + pass + def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): + pass # This is the regular storage strategy that does not result in any # optimizations but can handle every case. Applicable for both # fixed-sized and var-sized objects. -class ListStorageStrategy(AbstractStorageStrategy): +class ListStorageStrategy(AbstractListStorageStrategy): __metaclass__ = SingletonMeta - # erase, unerase = rerased.new_static_erasing_pair("list-storage-strategy") + # erase, unerase = rerased.new_static_erasing_pair("list-strategy") import_from_mixin(BasicStorageStrategyMixin) strategy_tag = 'list' @@ -131,23 +125,20 @@ def store(self, space, w_obj, n0, w_val): # TODO enable generalization by maintaining a counter of elements that are nil. self.storage(w_obj)[n0] = w_val - def erased_list(self, list): - make_sure_not_resized(list) - return self.erase(list) def initial_storage(self, space, size): - return self.erased_list([model.w_nil] * size) + return [model.w_nil] * size def storage_for_list(self, space, collection): - return self.erased_list([x for x in collection]) + return [x for x in collection] def copy_storage_from(self, space, w_obj, reuse_storage=False): length = w_obj.basic_size() - return self.erased_list([w_obj.strategy.fetch(space, w_obj, i) for i in range(length)]) + return [w_obj.strategy.fetch(space, w_obj, i) for i in range(length)] -class TaggingSmallIntegerStorageStrategy(AbstractStorageStrategy): +class TaggingSmallIntegerStorageStrategy(AbstractIntStorageStrategy): __metaclass__ = SingletonMeta - strategy_tag = 'tagging' - # erase, unerase = rerased.new_static_erasing_pair("tagging-small-integer-strategry") + # erase, unerase = rerased.new_static_erasing_pair("tagging-smallint-strategry") import_from_mixin(BasicStorageStrategyMixin) - uses_int_storage = True + strategy_tag = 'tagging-smallint' + needs_objspace = True @staticmethod def wrap(val): @@ -163,18 +154,15 @@ # also store W_LargePositiveInteger1Word? nil_value = 1 - def needs_objspace(self): - return True - def fetch(self, space, w_obj, n0): - val = self.int_storage(w_obj)[n0] + val = self.storage(w_obj)[n0] if val == self.nil_value: return space.w_nil else: return space.wrap_int(self.unwrap(val)) def store(self, space, w_obj, n0, w_val): - store = self.int_storage(w_obj) + store = self.storage(w_obj) if self.can_contain(w_val): store[n0] = self.wrap(space.unwrap_int(w_val)) else: @@ -186,7 +174,7 @@ return w_obj.store_with_new_strategy(space, ListStorageStrategy.singleton, n0, w_val) def initial_int_storage(self, space, size): - return self.erase([self.nil_value] * size) + return [self.nil_value] * size def int_storage_for_list(self, space, collection): length = len(collection) @@ -194,7 +182,7 @@ for i in range(length): if collection[i] != space.w_nil: store[i] = self.wrap(space.unwrap_int(collection[i])) - return self.erase(store) + return store def strategy_of_size(s_containing_class, size): if s_containing_class is None: From noreply at buildbot.pypy.org Thu Mar 20 13:16:06 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 20 Mar 2014 13:16:06 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Fixing tests. Message-ID: <20140320121606.AB4161C08F3@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r673:b1593fffd1e7 Date: 2014-03-20 12:54 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/b1593fffd1e7/ Log: Fixing tests. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -654,7 +654,9 @@ class W_PointersObject(W_AbstractPointersObject): _attrs_ = ['_size', 'list_storage', 'int_storage', 'strategy'] - + list_storage = None + int_storage = None + @jit.unroll_safe def __init__(self, space, w_class, size): from spyvm.strategies import strategy_of_size diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -44,7 +44,7 @@ def copy_storage_from(self, space, w_obj, reuse_storage): old_strategy = w_obj.strategy if old_strategy == self and reuse_storage: - return w_obj.list_storage + return self.storage(w_obj) else: # This can be overridden and optimized (reuse_storage flag, less temporary storage) return self.storage_for_list(space, w_obj.fetch_all(space)) @@ -68,10 +68,10 @@ def copy_storage_from(self, space, w_obj, reuse_storage): old_strategy = w_obj.strategy if old_strategy == self and reuse_storage: - return w_obj.int_storage + return self.storage(w_obj) else: # This can be overridden and optimized (reuse_storage flag, less temporary storage) - return self.int_storage_for_list(space, w_obj.fetch_all(space)) + return self.storage_for_list(space, w_obj.fetch_all(space)) class SingletonMeta(type): def __new__(cls, name, bases, dct): @@ -173,10 +173,10 @@ # Storing a wrong type - dehomogenize to ListStorage return w_obj.store_with_new_strategy(space, ListStorageStrategy.singleton, n0, w_val) - def initial_int_storage(self, space, size): + def initial_storage(self, space, size): return [self.nil_value] * size - def int_storage_for_list(self, space, collection): + def storage_for_list(self, space, collection): length = len(collection) store = [self.nil_value] * length for i in range(length): diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -18,15 +18,17 @@ self.strategy = strategies.ListStorageStrategy.singleton self.initialize_storage(space, size) self.store_all(space, [None] * 6 + stack + [space.w_nil] * 6) - s_self = self.as_blockcontext_get_shadow() + import pdb; pdb.set_trace() + s_self = self.as_blockcontext_get_shadow(space) s_self.init_stack_and_temps() s_self.reset_stack() s_self.push_all(stack) s_self.store_expected_argument_count(0) self.s_class = space.w_MethodContext.as_class_get_shadow(space) - def as_blockcontext_get_shadow(self): - self.shadow = shadow.BlockContextShadow(space, self) + def as_blockcontext_get_shadow(self, space): + if not self.shadow: + self.shadow = shadow.BlockContextShadow(space, self) return self.shadow def wrap(x): From noreply at buildbot.pypy.org Thu Mar 20 13:16:07 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 20 Mar 2014 13:16:07 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Updated strategies tests. Message-ID: <20140320121607.C12951C08F3@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r674:f5c765f96240 Date: 2014-03-20 13:14 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/f5c765f96240/ Log: Updated strategies tests. Fixed other tests. diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -757,16 +757,17 @@ def __init__(self, space, w_self=None, w_home=None, argcnt=0, initialip=0): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) - contextsize = w_home.as_methodcontext_get_shadow(space).myblocksize() creating_w_self = w_self is None if creating_w_self: + contextsize = w_home.as_methodcontext_get_shadow(space).myblocksize() w_self = model.W_PointersObject(space, space.w_BlockContext, contextsize) ContextPartShadow.__init__(self, space, w_self) if creating_w_self: w_self.store_shadow(self) self.store_expected_argument_count(argcnt) self.store_initialip(initialip) - self.store_w_home(w_home) + if w_home: + self.store_w_home(w_home) self.store_pc(initialip) self.init_stack_and_temps() diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -14,11 +14,11 @@ class MockFrame(model.W_PointersObject): def __init__(self, stack): + self.space = space size = 6 + len(stack) + 6 self.strategy = strategies.ListStorageStrategy.singleton self.initialize_storage(space, size) self.store_all(space, [None] * 6 + stack + [space.w_nil] * 6) - import pdb; pdb.set_trace() s_self = self.as_blockcontext_get_shadow(space) s_self.init_stack_and_temps() s_self.reset_stack() diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -155,7 +155,7 @@ assert s_object.s_home() == s_object def assert_contains_nils(w_obj): - for i in range(w_obj.strategy.size_of(w_obj)): + for i in range(w_obj.basic_size()): assert model.w_nil == w_obj.strategy.fetch(i, space, w_obj) def test_attach_mc(): diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -7,10 +7,8 @@ # Fieldtypes have a separate test file space, interp = tools.setup_module(tools, filename='bootstrapped.image') - class_Array = space.classtable["w_Array"] - def arr(size): return model.W_PointersObject(space, class_Array, size) @@ -19,21 +17,16 @@ a.store(space, 0, arr(1)) return a -def dense_arr(size): +def tagging_arr(size): a = arr(size) a.store(space, 0, space.wrap_int(12)) return a -def dense_arr_odd(size): +def tagging_arr_odd(size): a = arr(size) a.store(space, 2, space.wrap_int(12)) return a -def sparse_arr(size): - a = dense_arr(size) - a.store(space, 2, space.wrap_int(20)) - return a - def check_arr(arr, expected): for i in range(arr.basic_size()): if expected[i] == w_nil: @@ -85,55 +78,27 @@ a.store(space, 1, arr(1)) assert a.basic_size() == 5 -# ====== Dense and Sparse *SmallInteger-StorageStrategy +# ====== Tagging SmallInteger StorageStrategy -def test_AllNil_to_Dense(): - a = dense_arr(5) - assert isinstance(a.strategy, strategies.DenseSmallIntegerStorageStrategy) +def test_AllNil_to_Int(): + a = tagging_arr(5) + assert isinstance(a.strategy, strategies.TaggingSmallIntegerStorageStrategy) check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) -def test_Dense_store(): - a = dense_arr(5) +def test_Tagging_store(): + a = tagging_arr(5) a.store(space, 1, space.wrap_int(20)) a.store(space, 2, space.wrap_int(20)) - assert isinstance(a.strategy, strategies.DenseSmallIntegerStorageStrategy) + assert isinstance(a.strategy, strategies.TaggingSmallIntegerStorageStrategy) check_arr(a, [12, 20, 20, w_nil, w_nil]) -def test_Dense_overwrite_middle(): - a = dense_arr(5) - a.store(space, 1, space.wrap_int(20)) - a.store(space, 2, space.wrap_int(20)) - a.store(space, 1, space.wrap_int(30)) - check_arr(a, [12, 30, 20, w_nil, w_nil]) - -def test_Dense_overwrite_first(): - a = dense_arr(5) - a.store(space, 1, space.wrap_int(20)) - a.store(space, 2, space.wrap_int(20)) - a.store(space, 0, space.wrap_int(30)) - check_arr(a, [30, 20, 20, w_nil, w_nil]) - -def test_Dense_overwrite_last(): - a = dense_arr(5) - a.store(space, 1, space.wrap_int(20)) - a.store(space, 2, space.wrap_int(20)) - a.store(space, 2, space.wrap_int(30)) - check_arr(a, [12, 20, 30, w_nil, w_nil]) - -def test_Dense_odd(): - a = dense_arr_odd(5) - assert isinstance(a.strategy, strategies.DenseSmallIntegerStorageStrategy) +def test_Tagging_store_nil_to_nil(): + a = tagging_arr_odd(5) + a.store(space, 1, w_nil) check_arr(a, [w_nil, w_nil, 12, w_nil, w_nil]) - -def test_Dense_odd_store(): - a = dense_arr_odd(5) - a.store(space, 1, space.wrap_int(20)) - a.store(space, 3, space.wrap_int(40)) - a.store(space, 4, space.wrap_int(30)) - check_arr(a, [w_nil, 20, 12, 40, 30]) - -def test_Dense_odd_overwrite(): - a = dense_arr_odd(5) + +def test_Tagging_delete(): + a = tagging_arr_odd(5) a.store(space, 1, space.wrap_int(1)) a.store(space, 3, space.wrap_int(2)) a.store(space, 2, space.wrap_int(100)) @@ -141,67 +106,13 @@ a.store(space, 3, space.wrap_int(300)) check_arr(a, [w_nil, 200, 100, 300, w_nil]) -def test_Dense_store_nil_to_nil(): - a = dense_arr_odd(5) - a.store(space, 1, w_nil) - check_arr(a, [w_nil, w_nil, 12, w_nil, w_nil]) - -def test_Dense_delete(): - a = dense_arr_odd(5) - a.store(space, 1, space.wrap_int(1)) - a.store(space, 3, space.wrap_int(2)) - a.store(space, 2, space.wrap_int(100)) - a.store(space, 1, space.wrap_int(200)) - a.store(space, 3, space.wrap_int(300)) - check_arr(a, [w_nil, 200, 100, 300, w_nil]) - -def test_Dense_delete_first(): - a = dense_arr_odd(5) +def test_Tagging_delete_first(): + a = tagging_arr_odd(5) a.store(space, 1, space.wrap_int(1)) a.store(space, 1, w_nil) check_arr(a, [w_nil, w_nil, 12, w_nil, w_nil]) -def test_Dense_delete_last(): - a = dense_arr_odd(5) - a.store(space, 1, space.wrap_int(1)) - a.store(space, 2, w_nil) - check_arr(a, [w_nil, 1, w_nil, w_nil, w_nil]) - -def test_Dense_to_AllNil(): - a = dense_arr_odd(5) - a.store(space, 2, w_nil) - assert isinstance(a.strategy, strategies.AllNilStorageStrategy) - -def test_Dense_to_List(): - a = dense_arr_odd(5) +def test_Tagging_to_List(): + a = tagging_arr_odd(5) a.store(space, 1, arr(1)) assert isinstance(a.strategy, strategies.ListStorageStrategy) - -def test_Dense_to_Sparse_by_deleting(): - a = dense_arr_odd(5) - a.store(space, 1, space.wrap_int(10)) - a.store(space, 3, space.wrap_int(20)) - a.store(space, 2, w_nil) - assert isinstance(a.strategy, strategies.SparseSmallIntegerStorageStrategy) - check_arr(a, [w_nil, 10, w_nil, 20, w_nil]) - -def test_Dense_to_Sparse_by_storing(): - a = dense_arr_odd(5) - a.store(space, 4, space.wrap_int(10)) - assert isinstance(a.strategy, strategies.SparseSmallIntegerStorageStrategy) - check_arr(a, [w_nil, w_nil, 12, w_nil, 10]) - -def test_Sparse_store_nil(): - a = sparse_arr(5) - a.store(space, 2, w_nil) - check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) - -def test_Sparse_store(): - a = sparse_arr(5) - a.store(space, 4, space.wrap_int(100)) - check_arr(a, [12, w_nil, 20, w_nil, 100]) - -def test_Sparse_to_List(): - a = sparse_arr(5) - a.store(space, 4, arr(5)) - assert isinstance(a.strategy, strategies.ListStorageStrategy) From noreply at buildbot.pypy.org Thu Mar 20 13:20:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 13:20:59 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Found out and explain why we get rare crashes in pypy if we Message-ID: <20140320122059.473081C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70112:3ada12cdea0e Date: 2014-03-20 13:20 +0100 http://bitbucket.org/pypy/pypy/changeset/3ada12cdea0e/ Log: Found out and explain why we get rare crashes in pypy if we enable this 'stm_ignored'. diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -377,14 +377,18 @@ # special non-computed-yet value. if not s: return 0 - with stm_ignored: - x = s.hash + #with stm_ignored: + x = s.hash if x == 0: x = _hash_string(s.chars) if x == 0: x = 29872897 - with stm_ignored: - s.hash = x + # XXX STM note: we would like this write to be stm-ignored, + # but we can't, because ll_strfasthash() might later miss + # the written value and return 0 again (rarely). Think + # again later about the best option. + #with stm_ignored: + s.hash = x return x def ll_length(s): From noreply at buildbot.pypy.org Thu Mar 20 15:37:49 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 20 Mar 2014 15:37:49 +0100 (CET) Subject: [pypy-commit] pypy improve-consecutive-dict-lookups: close to be merged branch Message-ID: <20140320143749.1B0F21C1578@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: improve-consecutive-dict-lookups Changeset: r70113:f0136fc09640 Date: 2014-03-20 16:36 +0200 http://bitbucket.org/pypy/pypy/changeset/f0136fc09640/ Log: close to be merged branch diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3736,7 +3736,7 @@ assert False, 'should not be called' from rpython.jit.codewriter.effectinfo import EffectInfo - effectinfo = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, EffectInfo.OS_MATH_SQRT) + effectinfo = EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, EffectInfo.OS_MATH_SQRT) FPTR = self.Ptr(self.FuncType([lltype.Float], lltype.Float)) func_ptr = llhelper(FPTR, math_sqrt) FUNC = deref(FPTR) From noreply at buildbot.pypy.org Thu Mar 20 15:37:51 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 20 Mar 2014 15:37:51 +0100 (CET) Subject: [pypy-commit] pypy default: Merge "improve-consecutive-dict-lookups". This branch makes it possible Message-ID: <20140320143751.1E8531C1578@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70114:0679ba1c1143 Date: 2014-03-20 16:37 +0200 http://bitbucket.org/pypy/pypy/changeset/0679ba1c1143/ Log: Merge "improve-consecutive-dict-lookups". This branch makes it possible to cache consecutive dict lookups (for both normal and ordered dicts) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3736,7 +3736,7 @@ assert False, 'should not be called' from rpython.jit.codewriter.effectinfo import EffectInfo - effectinfo = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, EffectInfo.OS_MATH_SQRT) + effectinfo = EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, EffectInfo.OS_MATH_SQRT) FPTR = self.Ptr(self.FuncType([lltype.Float], lltype.Float)) func_ptr = llhelper(FPTR, math_sqrt) FUNC = deref(FPTR) diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -178,7 +178,7 @@ return (fnaddr, calldescr) def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None): + extraeffect=None, extradescr=None): """Return the calldescr that describes all calls done by 'op'. This returns a calldescr that we can put in the corresponding call operation in the calling jitcode. It gets an effectinfo @@ -259,6 +259,7 @@ effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op, self.seen), self.cpu, extraeffect, oopspecindex, can_invalidate, call_release_gil_target, + extradescr, ) # assert effectinfo is not None diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -21,6 +21,7 @@ OS_ARRAYCOPY = 1 # "list.ll_arraycopy" OS_STR2UNICODE = 2 # "str.str2unicode" OS_SHRINK_ARRAY = 3 # rgc.ll_shrink_array + OS_DICT_LOOKUP = 4 # ll_dict_lookup # OS_STR_CONCAT = 22 # "stroruni.concat" OS_STR_SLICE = 23 # "stroruni.slice" @@ -88,15 +89,18 @@ # for debugging: _OS_CANRAISE = set([ OS_NONE, OS_STR2UNICODE, OS_LIBFFI_CALL, OS_RAW_MALLOC_VARSIZE_CHAR, - OS_JIT_FORCE_VIRTUAL, OS_SHRINK_ARRAY, + OS_JIT_FORCE_VIRTUAL, OS_SHRINK_ARRAY, OS_DICT_LOOKUP, ]) def __new__(cls, readonly_descrs_fields, readonly_descrs_arrays, + readonly_descrs_interiorfields, write_descrs_fields, write_descrs_arrays, + write_descrs_interiorfields, extraeffect=EF_CAN_RAISE, oopspecindex=OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL): + call_release_gil_target=llmemory.NULL, + extradescrs=None): key = (frozenset_or_none(readonly_descrs_fields), frozenset_or_none(readonly_descrs_arrays), frozenset_or_none(write_descrs_fields), @@ -121,17 +125,21 @@ result = object.__new__(cls) result.readonly_descrs_fields = readonly_descrs_fields result.readonly_descrs_arrays = readonly_descrs_arrays + result.readonly_descrs_interiorfields = readonly_descrs_interiorfields if extraeffect == EffectInfo.EF_LOOPINVARIANT or \ extraeffect == EffectInfo.EF_ELIDABLE_CANNOT_RAISE or \ extraeffect == EffectInfo.EF_ELIDABLE_CAN_RAISE: result.write_descrs_fields = [] result.write_descrs_arrays = [] + result.write_descrs_interiorfields = [] else: result.write_descrs_fields = write_descrs_fields result.write_descrs_arrays = write_descrs_arrays + result.write_descrs_interiorfields = write_descrs_interiorfields result.extraeffect = extraeffect result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex + result.extradescrs = extradescrs result.call_release_gil_target = call_release_gil_target if result.check_can_raise(): assert oopspecindex in cls._OS_CANRAISE @@ -163,7 +171,7 @@ return None return frozenset(x) -EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, +EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, None, None, EffectInfo.EF_RANDOM_EFFECTS, can_invalidate=True) @@ -172,19 +180,24 @@ extraeffect=EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL): + call_release_gil_target=llmemory.NULL, + extradescr=None): from rpython.translator.backendopt.writeanalyze import top_set if effects is top_set or extraeffect == EffectInfo.EF_RANDOM_EFFECTS: readonly_descrs_fields = None readonly_descrs_arrays = None + readonly_descrs_interiorfields = None write_descrs_fields = None write_descrs_arrays = None + write_descrs_interiorfields = None extraeffect = EffectInfo.EF_RANDOM_EFFECTS else: readonly_descrs_fields = [] readonly_descrs_arrays = [] + readonly_descrs_interiorfields = [] write_descrs_fields = [] write_descrs_arrays = [] + write_descrs_interiorfields = [] def add_struct(descrs_fields, (_, T, fieldname)): T = deref(T) @@ -198,6 +211,17 @@ descr = cpu.arraydescrof(ARRAY) descrs_arrays.append(descr) + def add_interiorfield(descrs_interiorfields, (_, T, fieldname)): + T = deref(T) + if not isinstance(T, lltype.Array): + return # let's not consider structs for now + if not consider_array(T): + return + if getattr(T.OF, fieldname) is lltype.Void: + return + descr = cpu.interiorfielddescrof(T, fieldname) + descrs_interiorfields.append(descr) + for tup in effects: if tup[0] == "struct": add_struct(write_descrs_fields, tup) @@ -205,6 +229,12 @@ tupw = ("struct",) + tup[1:] if tupw not in effects: add_struct(readonly_descrs_fields, tup) + elif tup[0] == "interiorfield": + add_interiorfield(write_descrs_interiorfields, tup) + elif tup[0] == "readinteriorfield": + tupw = ('interiorfield',) + tup[1:] + if tupw not in effects: + add_interiorfield(readonly_descrs_interiorfields, tup) elif tup[0] == "array": add_array(write_descrs_arrays, tup) elif tup[0] == "readarray": @@ -216,12 +246,15 @@ # return EffectInfo(readonly_descrs_fields, readonly_descrs_arrays, + readonly_descrs_interiorfields, write_descrs_fields, write_descrs_arrays, + write_descrs_interiorfields, extraeffect, oopspecindex, can_invalidate, - call_release_gil_target) + call_release_gil_target, + extradescr) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -403,6 +403,9 @@ prepare = self._handle_math_sqrt_call elif oopspec_name.startswith('rgc.'): prepare = self._handle_rgc_call + elif oopspec_name.endswith('dict.lookup'): + # also ordereddict.lookup + prepare = self._handle_dict_lookup_call else: prepare = self.prepare_builtin_call try: @@ -1680,9 +1683,11 @@ # ---------- # Strings and Unicodes. - def _handle_oopspec_call(self, op, args, oopspecindex, extraeffect=None): + def _handle_oopspec_call(self, op, args, oopspecindex, extraeffect=None, + extradescr=None): calldescr = self.callcontrol.getcalldescr(op, oopspecindex, - extraeffect) + extraeffect, + extradescr=extradescr) if extraeffect is not None: assert (is_test_calldescr(calldescr) # for tests or calldescr.get_extra_info().extraeffect == extraeffect) @@ -1846,6 +1851,14 @@ return self._handle_oopspec_call(op, args, EffectInfo.OS_MATH_SQRT, EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + def _handle_dict_lookup_call(self, op, oopspec_name, args): + extradescr1 = self.cpu.fielddescrof(op.args[1].concretetype.TO, + 'entries') + extradescr2 = self.cpu.interiorfielddescrof( + op.args[1].concretetype.TO.entries.TO, 'key') + return self._handle_oopspec_call(op, args, EffectInfo.OS_DICT_LOOKUP, + extradescr=[extradescr1, extradescr2]) + def _handle_rgc_call(self, op, oopspec_name, args): if oopspec_name == 'rgc.ll_shrink_array': return self._handle_oopspec_call(op, args, EffectInfo.OS_SHRINK_ARRAY, EffectInfo.EF_CAN_RAISE) diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -60,7 +60,8 @@ class FakeResidualCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None, + extradescr=None): return 'calldescr' def calldescr_canraise(self, calldescr): return True @@ -117,7 +118,8 @@ self.callinfocollection = FakeCallInfoCollection() def guess_call_kind(self, op): return 'builtin' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None, + extradescr=None): assert oopspecindex is not None # in this test EI = effectinfo.EffectInfo if oopspecindex != EI.OS_ARRAYCOPY: diff --git a/rpython/jit/codewriter/test/test_list.py b/rpython/jit/codewriter/test/test_list.py --- a/rpython/jit/codewriter/test/test_list.py +++ b/rpython/jit/codewriter/test/test_list.py @@ -37,7 +37,8 @@ class FakeCallControl: class getcalldescr(AbstractDescr): - def __init__(self, op, oopspecindex=0, extraeffect=None): + def __init__(self, op, oopspecindex=0, extraeffect=None, + extradescr=None): self.op = op self.oopspecindex = oopspecindex def __repr__(self): diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -1,8 +1,10 @@ import os +from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.jit.metainterp.optimizeopt.util import args_dict from rpython.jit.metainterp.history import Const from rpython.jit.metainterp.jitexc import JitException -from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS +from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS, REMOVED from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib.objectmodel import we_are_translated @@ -173,6 +175,10 @@ self.cached_fields = {} # cached array items: {array descr: {index: CachedField}} self.cached_arrayitems = {} + # cached dict items: {dict descr: {(optval, index): box-or-const}} + self.cached_dict_reads = {} + # cache of corresponding array descrs + self.corresponding_array_descrs = {} # self._lazy_setfields_and_arrayitems = [] self._remove_guard_not_invalidated = False @@ -180,9 +186,13 @@ self.postponed_op = None def force_at_end_of_preamble(self): + self.cached_dict_reads.clear() + self.corresponding_array_descrs.clear() self.force_all_lazy_setfields_and_arrayitems() def flush(self): + self.cached_dict_reads.clear() + self.corresponding_array_descrs.clear() self.force_all_lazy_setfields_and_arrayitems() self.emit_postponed_op() @@ -214,6 +224,7 @@ del self._lazy_setfields_and_arrayitems[:] self.cached_fields.clear() self.cached_arrayitems.clear() + self.cached_dict_reads.clear() def field_cache(self, descr): try: @@ -282,6 +293,44 @@ self.force_all_lazy_setfields_and_arrayitems() self.clean_caches() + def optimize_CALL(self, op): + # dispatch based on 'oopspecindex' to a method that handles + # specifically the given oopspec call. For non-oopspec calls, + # oopspecindex is just zero. + effectinfo = op.getdescr().get_extra_info() + oopspecindex = effectinfo.oopspecindex + if oopspecindex == EffectInfo.OS_DICT_LOOKUP: + if self._optimize_CALL_DICT_LOOKUP(op): + return + self.emit_operation(op) + + def _optimize_CALL_DICT_LOOKUP(self, op): + descrs = op.getdescr().get_extra_info().extradescrs + descr1 = descrs[0] + descr2 = descrs[1] + if descr1 in self.cached_dict_reads: + d = self.cached_dict_reads[descr1] + else: + d = self.cached_dict_reads[descr1] = args_dict() + self.corresponding_array_descrs[descr2] = descr1 + args = self.optimizer.make_args_key(op) + try: + res_v = d[args] + except KeyError: + d[args] = self.getvalue(op.result) + return False + else: + self.make_equal_to(op.result, res_v) + self.last_emitted_operation = REMOVED + return True + + def optimize_GUARD_NO_EXCEPTION(self, op): + if self.last_emitted_operation is REMOVED: + return + self.emit_operation(op) + + optimize_GUARD_EXCEPTION = optimize_GUARD_NO_EXCEPTION + def force_from_effectinfo(self, effectinfo): # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large @@ -290,9 +339,20 @@ for arraydescr in effectinfo.readonly_descrs_arrays: self.force_lazy_setarrayitem(arraydescr) for fielddescr in effectinfo.write_descrs_fields: + try: + del self.cached_dict_reads[fielddescr] + except KeyError: + pass self.force_lazy_setfield(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) + for descr in effectinfo.write_descrs_interiorfields: + if descr in self.corresponding_array_descrs: + dictdescr = self.corresponding_array_descrs.pop(descr) + try: + del self.cached_dict_reads[dictdescr] + except KeyError: + pass # someone did it already if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info self.force_lazy_setfield(vrefinfo.descr_forced) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5444,6 +5444,21 @@ """ self.optimize_loop(ops, expected) + def test_consecutive_getinteriorfields(self): + py.test.skip("we want this to pass") + ops = """ + [p0, i0] + i1 = getinteriorfield_gc(p0, i0, descr=valuedescr) + i2 = getinteriorfield_gc(p0, i0, descr=valuedescr) + jump(i1, i2) + """ + expected = """ + [p0, i0] + i1 = getinteriorfield_gc(p0, i0, descr=valuedescr) + jump(i1, i1) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -181,28 +181,29 @@ plaincalldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo.MOST_GENERAL) nonwritedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [])) + EffectInfo([], [], [], [], [], [])) writeadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [adescr], [])) + EffectInfo([], [], [], [adescr], [], [])) writearraydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [adescr], [arraydescr])) + EffectInfo([], [], [], [adescr], [arraydescr], + [])) readadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([adescr], [], [], [])) + EffectInfo([adescr], [], [], [], [], [])) mayforcevirtdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([nextdescr], [], [], [], + EffectInfo([nextdescr], [], [], [], [], [], EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE, can_invalidate=True)) arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [arraydescr], [], [arraydescr], + EffectInfo([], [arraydescr], [], [], [arraydescr], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_ARRAYCOPY)) raw_malloc_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], + EffectInfo([], [], [], [], [], [], EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR)) raw_free_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], + EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_RAW_FREE)) @@ -251,17 +252,18 @@ _oopspecindex = getattr(EffectInfo, _os) locals()[_name] = \ cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=_oopspecindex)) # _oopspecindex = getattr(EffectInfo, _os.replace('STR', 'UNI')) locals()[_name.replace('str', 'unicode')] = \ cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=_oopspecindex)) s2u_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], oopspecindex=EffectInfo.OS_STR2UNICODE)) + EffectInfo([], [], [], [], [], [], + oopspecindex=EffectInfo.OS_STR2UNICODE)) # class LoopToken(AbstractDescr): @@ -277,7 +279,7 @@ virtualtokendescr = vrefinfo.descr_virtual_token virtualforceddescr = vrefinfo.descr_forced FUNC = lltype.FuncType([], lltype.Void) - ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + ei = EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, can_invalidate=False, oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) clear_vable = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, ei) diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -193,6 +193,107 @@ self.check_simple_loop({'int_sub': 1, 'int_gt': 1, 'guard_true': 1, 'jump': 1}) + def test_dict_two_lookups(self): + driver = JitDriver(greens = [], reds = 'auto') + d = {'a': 3, 'b': 4} + indexes = ['a', 'b'] + + def f(n): + s = 0 + while n > 0: + driver.jit_merge_point() + s += d[indexes[n & 1]] + s += d[indexes[n & 1]] + n -= 1 + return s + + self.meta_interp(f, [10]) + # XXX should be one getinteriorfield_gc + self.check_simple_loop(call=1, getinteriorfield_gc=2, + guard_no_exception=1) + + def test_ordered_dict_two_lookups(self): + driver = JitDriver(greens = [], reds = 'auto') + d = OrderedDict() + d['a'] = 3 + d['b'] = 4 + indexes = ['a', 'b'] + + def f(n): + s = 0 + while n > 0: + driver.jit_merge_point() + s += d[indexes[n & 1]] + s += d[indexes[n & 1]] + n -= 1 + return s + + self.meta_interp(f, [10]) + # XXX should be one getinteriorfield_gc + self.check_simple_loop(call=1, getinteriorfield_gc=2, + guard_no_exception=1) + + def test_dict_insert_invalidates_caches(self): + driver = JitDriver(greens = [], reds = 'auto') + indexes = ['aa', 'b', 'cc'] + + def f(n): + d = {'aa': 3, 'b': 4, 'cc': 5} + s = 0 + while n > 0: + driver.jit_merge_point() + index = indexes[n & 1] + s += d[index] + d['aa'] += 1 # this will invalidate the index + s += d[index] + n -= 1 + return s + + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_simple_loop(call=5) + + def test_dict_array_write_invalidates_caches(self): + driver = JitDriver(greens = [], reds = 'auto') + indexes = ['aa', 'b', 'cc'] + + def f(n): + d = {'aa': 3, 'b': 4, 'cc': 5} + s = 0 + while n > 0: + driver.jit_merge_point() + index = indexes[n & 1] + s += d[index] + del d['cc'] + s += d[index] + d['cc'] = 3 + n -= 1 + return s + + exp = f(10) + res = self.meta_interp(f, [10]) + assert res == exp + self.check_simple_loop(call=7) + + def test_dict_double_lookup_2(self): + driver = JitDriver(greens = [], reds = 'auto') + indexes = ['aa', 'b', 'cc'] + + def f(n): + d = {'aa': 3, 'b': 4, 'cc': 5} + s = 0 + while n > 0: + driver.jit_merge_point() + index = indexes[n & 1] + s += d[index] + d[index] += 1 + n -= 1 + return s + + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_simple_loop(call=3) + class TestLLtype(DictTests, LLJitMixin): pass diff --git a/rpython/jit/metainterp/virtualizable.py b/rpython/jit/metainterp/virtualizable.py --- a/rpython/jit/metainterp/virtualizable.py +++ b/rpython/jit/metainterp/virtualizable.py @@ -302,7 +302,7 @@ self.clear_vable_ptr = self.warmrunnerdesc.helper_func( FUNCPTR, self.clear_vable_token) FUNC = FUNCPTR.TO - ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + ei = EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, can_invalidate=False, oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) diff --git a/rpython/rtyper/lltypesystem/rdict.py b/rpython/rtyper/lltypesystem/rdict.py --- a/rpython/rtyper/lltypesystem/rdict.py +++ b/rpython/rtyper/lltypesystem/rdict.py @@ -569,6 +569,7 @@ PERTURB_SHIFT = 5 @jit.look_inside_iff(lambda d, key, hash: jit.isvirtual(d) and jit.isconstant(key)) + at jit.oopspec('dict.lookup(d, key, hash)') def ll_dict_lookup(d, key, hash): entries = d.entries ENTRIES = lltype.typeOf(entries).TO diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -709,6 +709,7 @@ @jit.look_inside_iff(lambda d, key, hash, store_flag, T: jit.isvirtual(d) and jit.isconstant(key)) + at jit.oopspec('ordereddict.lookup(d, key, hash, store_flag, T)') def ll_dict_lookup(d, key, hash, store_flag, T): INDEXES = _ll_ptr_to_array_of(T) entries = d.entries diff --git a/rpython/translator/backendopt/test/test_writeanalyze.py b/rpython/translator/backendopt/test/test_writeanalyze.py --- a/rpython/translator/backendopt/test/test_writeanalyze.py +++ b/rpython/translator/backendopt/test/test_writeanalyze.py @@ -353,3 +353,23 @@ result = wa.analyze(fgraph.startblock.operations[-1]) assert list(result) == [("struct", lltype.Ptr(S), "x")] + + def test_interiorfield(self): + A = lltype.GcArray(lltype.Struct('x', ('x', lltype.Signed), + ('y', lltype.Signed))) + + def g(x): + a = lltype.malloc(A, 1) + a[0].y = 3 + return f(a, x) + + def f(a, x): + a[0].x = x + return a[0].y + + t, wa = self.translate(g, [int]) + ggraph = graphof(t, g) + result = wa.analyze(ggraph.startblock.operations[-1]) + res = list(result) + assert ('readinteriorfield', lltype.Ptr(A), 'y') in res + assert ('interiorfield', lltype.Ptr(A), 'x') in res diff --git a/rpython/translator/backendopt/writeanalyze.py b/rpython/translator/backendopt/writeanalyze.py --- a/rpython/translator/backendopt/writeanalyze.py +++ b/rpython/translator/backendopt/writeanalyze.py @@ -1,4 +1,4 @@ -from rpython.flowspace.model import Variable +from rpython.flowspace.model import Variable, Constant from rpython.translator.backendopt import graphanalyze top_set = object() @@ -37,6 +37,12 @@ return top_set return result1.union(result2) + def _getinteriorname(self, op): + if (isinstance(op.args[1], Constant) and + isinstance(op.args[1].value, str)): + return op.args[1].value + return op.args[2].value + def analyze_simple_operation(self, op, graphinfo): if op.opname == "setfield": if graphinfo is None or not graphinfo.is_fresh_malloc(op.args[0]): @@ -45,11 +51,18 @@ elif op.opname == "setarrayitem": if graphinfo is None or not graphinfo.is_fresh_malloc(op.args[0]): return self._array_result(op.args[0].concretetype) + elif op.opname == "setinteriorfield": + if graphinfo is None or not graphinfo.is_fresh_malloc(op.args[0]): + name = self._getinteriorname(op) + return self._interiorfield_result(op.args[0].concretetype, name) return empty_set def _array_result(self, TYPE): return frozenset([("array", TYPE)]) + def _interiorfield_result(self, TYPE, fieldname): + return frozenset([("interiorfield", TYPE, fieldname)]) + def compute_graph_info(self, graph): return FreshMallocs(graph) @@ -99,4 +112,8 @@ elif op.opname == "getarrayitem": return frozenset([ ("readarray", op.args[0].concretetype)]) + elif op.opname == "getinteriorfield": + name = self._getinteriorname(op) + return frozenset([("readinteriorfield", op.args[0].concretetype, + name)]) return WriteAnalyzer.analyze_simple_operation(self, op, graphinfo) From noreply at buildbot.pypy.org Thu Mar 20 15:51:44 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 20 Mar 2014 15:51:44 +0100 (CET) Subject: [pypy-commit] pypy default: Remove trailing whitespace introduced by the merge of improve-consecutive-dict-lookups. Message-ID: <20140320145144.21C021C1578@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r70115:4c6239cc82ff Date: 2014-03-20 15:50 +0100 http://bitbucket.org/pypy/pypy/changeset/4c6239cc82ff/ Log: Remove trailing whitespace introduced by the merge of improve- consecutive-dict-lookups. diff --git a/rpython/translator/backendopt/test/test_writeanalyze.py b/rpython/translator/backendopt/test/test_writeanalyze.py --- a/rpython/translator/backendopt/test/test_writeanalyze.py +++ b/rpython/translator/backendopt/test/test_writeanalyze.py @@ -362,7 +362,7 @@ a = lltype.malloc(A, 1) a[0].y = 3 return f(a, x) - + def f(a, x): a[0].x = x return a[0].y From noreply at buildbot.pypy.org Thu Mar 20 15:56:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 15:56:55 +0100 (CET) Subject: [pypy-commit] pypy default: Kill some tests that are broken (they write a word at a random location in Message-ID: <20140320145655.914021D2864@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70116:23f63b53184e Date: 2014-03-20 15:55 +0100 http://bitbucket.org/pypy/pypy/changeset/23f63b53184e/ Log: Kill some tests that are broken (they write a word at a random location in memory). Their usage is unclear. diff --git a/rpython/jit/backend/x86/test/test_assembler.py b/rpython/jit/backend/x86/test/test_assembler.py --- a/rpython/jit/backend/x86/test/test_assembler.py +++ b/rpython/jit/backend/x86/test/test_assembler.py @@ -55,9 +55,7 @@ asm = cpu.assembler asm.setup_once() asm.setup(looptoken) - self.fm = X86FrameManager(0) - self.xrm = X86XMMRegisterManager(None, frame_manager=self.fm, - assembler=asm) + self.xrm = X86XMMRegisterManager(None, assembler=asm) callback(asm) asm.mc.RET() rawstart = asm.materialize_loop(looptoken) @@ -75,29 +73,6 @@ res = self.do_test(callback) assert res == 42 - def test_push_stack(self): - def callback(asm): - loc = self.fm.frame_pos(5, INT) - asm.mc.SUB_ri(esp.value, 64) - asm.mov(imm(42), loc) - asm.regalloc_push(loc) - asm.regalloc_pop(eax) - asm.mc.ADD_ri(esp.value, 64) - res = self.do_test(callback) - assert res == 42 - - def test_pop_stack(self): - def callback(asm): - loc = self.fm.frame_pos(5, INT) - asm.mc.SUB_ri(esp.value, 64) - asm.mov(imm(42), edx) - asm.regalloc_push(edx) - asm.regalloc_pop(loc) - asm.mov(loc, eax) - asm.mc.ADD_ri(esp.value, 64) - res = self.do_test(callback) - assert res == 42 - def test_simple_xmm(self): def callback(asm): c = ConstFloat(longlong.getfloatstorage(-42.5)) @@ -108,33 +83,3 @@ asm.mc.CVTTSD2SI(eax, xmm0) res = self.do_test(callback) assert res == -42 - - def test_push_stack_xmm(self): - def callback(asm): - c = ConstFloat(longlong.getfloatstorage(-42.5)) - loc = self.xrm.convert_to_imm(c) - loc2 = self.fm.frame_pos(4, FLOAT) - asm.mc.SUB_ri(esp.value, 64) - asm.mov(loc, xmm5) - asm.mov(xmm5, loc2) - asm.regalloc_push(loc2) - asm.regalloc_pop(xmm0) - asm.mc.ADD_ri(esp.value, 64) - asm.mc.CVTTSD2SI(eax, xmm0) - res = self.do_test(callback) - assert res == -42 - - def test_pop_stack_xmm(self): - def callback(asm): - c = ConstFloat(longlong.getfloatstorage(-42.5)) - loc = self.xrm.convert_to_imm(c) - loc2 = self.fm.frame_pos(4, FLOAT) - asm.mc.SUB_ri(esp.value, 64) - asm.mov(loc, xmm5) - asm.regalloc_push(xmm5) - asm.regalloc_pop(loc2) - asm.mov(loc2, xmm0) - asm.mc.ADD_ri(esp.value, 64) - asm.mc.CVTTSD2SI(eax, xmm0) - res = self.do_test(callback) - assert res == -42 From noreply at buildbot.pypy.org Thu Mar 20 16:00:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 16:00:34 +0100 (CET) Subject: [pypy-commit] pypy default: A test specifically for 76b06820d08b. Message-ID: <20140320150034.6E04C1D2888@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70117:e30ec8f6a495 Date: 2014-03-20 15:59 +0100 http://bitbucket.org/pypy/pypy/changeset/e30ec8f6a495/ Log: A test specifically for 76b06820d08b. diff --git a/rpython/jit/backend/x86/test/test_assembler.py b/rpython/jit/backend/x86/test/test_assembler.py --- a/rpython/jit/backend/x86/test/test_assembler.py +++ b/rpython/jit/backend/x86/test/test_assembler.py @@ -83,3 +83,9 @@ asm.mc.CVTTSD2SI(eax, xmm0) res = self.do_test(callback) assert res == -42 + + def test_xmm_pushes_8_bytes(self): + def callback(asm): + asm.regalloc_push(xmm5) + asm.mc.ADD(esp, imm(8)) + self.do_test(callback) From noreply at buildbot.pypy.org Thu Mar 20 16:23:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 16:23:17 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140320152317.4250F1D2902@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70118:19982e263d93 Date: 2014-03-20 16:22 +0100 http://bitbucket.org/pypy/pypy/changeset/19982e263d93/ Log: in-progress diff --git a/rpython/jit/backend/x86/stmtlocal.py b/rpython/jit/backend/x86/stmtlocal.py --- a/rpython/jit/backend/x86/stmtlocal.py +++ b/rpython/jit/backend/x86/stmtlocal.py @@ -11,7 +11,7 @@ eci = ExternalCompilationInfo(post_include_bits=[''' static long pypy__threadlocal_base(void) { - /* XXX ONLY LINUX WITH GCC FOR NOW XXX */ + /* XXX ONLY LINUX WITH GCC/CLANG FOR NOW XXX */ long result; asm("%s" : "=r"(result)); return result; @@ -23,8 +23,7 @@ 'pypy__threadlocal_base', [], lltype.Signed, compilation_info=eci, - threadsafe=False, - transactionsafe=True) + _nowrapper=True) def tl_segment_prefix(mc): @@ -47,9 +46,3 @@ 'stm_invalidate_jmp_buf', [llmemory.Address], lltype.Void, sandboxsafe=True, _nowrapper=True, transactionsafe=True) -stm_pointer_equal_fn = rffi.llexternal( - 'stm_pointer_equal', - [llmemory.Address, llmemory.Address], lltype.Bool, - sandboxsafe=True, _nowrapper=True, transactionsafe=True) - - diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -502,9 +502,6 @@ rd_virtuals = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) - rd_stm_busy = False # same as CNT_BUSY_FLAG, in a different field, - # only for stm - status = r_uint(0) ST_BUSY_FLAG = 0x01 # if set, busy tracing from the guard @@ -573,30 +570,6 @@ _trace_and_compile_from_bridge._dont_inline_ = True def must_compile(self, deadframe, metainterp_sd, jitdriver_sd): - if rgc.stm_is_enabled(): - method = self.must_compile_stm - else: - method = self.must_compile_nonstm - return method(deadframe, metainterp_sd, jitdriver_sd) - - def must_compile_stm(self, deadframe, metainterp_sd, jitdriver_sd): - XXX # fix me - trace_eagerness = jitdriver_sd.warmstate.trace_eagerness - with stm_ignored: - approx_counter = self._counter + 1 - self._counter = approx_counter - - # The call to guard_already_patched is necessary because it is - # possible that the current transaction didn't see the - # patched JMP yet, but already sees rd_stm_busy as False (because - # the patching is in raw-memory). - # Thus it may try to compile a trace too and also patch the assembler. - # However, this would trigger the assertion in - # x86.assembler.patch_jump_for_descr. - return (approx_counter >= trace_eagerness and not self.rd_stm_busy - and not metainterp_sd.cpu.guard_already_patched(self)) - - def must_compile_nonstm(self, deadframe, metainterp_sd, jitdriver_sd): jitcounter = metainterp_sd.warmrunnerdesc.jitcounter # if self.status & (self.ST_BUSY_FLAG | self.ST_TYPE_MASK) == 0: @@ -638,23 +611,27 @@ intval * 1442968193) # increment = jitdriver_sd.warmstate.increment_trace_eagerness - return jitcounter.tick(hash, increment) + result = jitcounter.tick(hash, increment) + if rgc.stm_is_enabled(): + # The call to guard_already_patched is necessary because it is + # possible that the current transaction didn't see the + # patched JMP yet, but already sees the ST_BUSY_FLAG as 0 (because + # the patching is in raw-memory). + # Thus it may try to compile a trace too and also patch the assembler. + # However, this would trigger the assertion in + # x86.assembler.patch_jump_for_descr. + result = result and not metainterp_sd.cpu.guard_already_patched(self) + return result def start_compiling(self): # start tracing and compiling from this guard. - if rgc.stm_is_enabled(): - self.rd_stm_busy = True - else: - self.status |= self.ST_BUSY_FLAG + self.status |= self.ST_BUSY_FLAG def done_compiling(self): # done tracing and compiling from this guard. Note that if the # bridge has not been successfully compiled, the jitcounter for # it was reset to 0 already by jitcounter.tick() and not # incremented at all as long as ST_BUSY_FLAG was set. - if rgc.stm_is_enabled(): - XXX # review - self.rd_stm_busy = False self.status &= ~self.ST_BUSY_FLAG def compile_and_attach(self, metainterp, new_loop): diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -10,7 +10,10 @@ # keep in sync with the C code in pypy__decay_jit_counters ENTRY = lltype.Struct('timetable_entry', ('times', lltype.FixedSizeArray(rffi.FLOAT, 5)), - ('subhashes', lltype.FixedSizeArray(rffi.USHORT, 5))) + ('subhashes', lltype.FixedSizeArray(rffi.USHORT, 5)), + hints={'stm_dont_track_raw_accesses': True}) +ENTRY_ARRAY = lltype.Array(ENTRY, hints={'nolength': True, + 'stm_dont_track_raw_accesses': True}) class JitCounter: @@ -29,7 +32,7 @@ # and we're getting a 32-bytes-long entry; then this entry # contains 5 possible ways, each occupying 6 bytes: 4 bytes # for a float, and the 2 lowest bytes from the original hash. - self.timetable = lltype.malloc(rffi.CArray(ENTRY), self.size, + self.timetable = lltype.malloc(ENTRY_ARRAY, self.size, flavor='raw', zero=True, track_allocation=False) self._nexthash = r_uint(0) @@ -205,7 +208,8 @@ pypy__decay_jit_counters = rffi.llexternal( "pypy__decay_jit_counters", [rffi.CCHARP, lltype.Float, lltype.Signed], - lltype.Void, compilation_info=eci, _nowrapper=True, sandboxsafe=True) + lltype.Void, compilation_info=eci, _nowrapper=True, sandboxsafe=True, + transactionsafe=True) # ____________________________________________________________ diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -35,7 +35,7 @@ def build_opt_chain(metainterp_sd, enable_opts): optimizations = [] unroll = 'unroll' in enable_opts # 'enable_opts' is normally a dict - if config.translation.stm: + if metainterp_sd.config.translation.stm: optimizations.append(OptSTM()) for name, opt in unroll_all_opts: diff --git a/rpython/jit/metainterp/test/test_warmstate.py b/rpython/jit/metainterp/test/test_warmstate.py --- a/rpython/jit/metainterp/test/test_warmstate.py +++ b/rpython/jit/metainterp/test/test_warmstate.py @@ -4,7 +4,6 @@ from rpython.jit.metainterp.warmstate import wrap, unwrap, specialize_value from rpython.jit.metainterp.warmstate import equal_whatever, hash_whatever from rpython.jit.metainterp.warmstate import WarmEnterState -from rpython.jit.metainterp.warmstate import MODE_HAVE_PROC, MODE_TRACING from rpython.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from rpython.jit.metainterp.counter import DeterministicJitCounter diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -1,7 +1,3 @@ -XXX -this file here is from default. It's missing some changes related to stm -that were conflicting (see the stmgc-c4 branch). We need to review carefully... -XXX import sys import weakref From noreply at buildbot.pypy.org Thu Mar 20 16:50:15 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 20 Mar 2014 16:50:15 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_whatsnew Message-ID: <20140320155015.B4CDF1C1413@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70119:ae422bbeeb59 Date: 2014-03-20 17:49 +0200 http://bitbucket.org/pypy/pypy/changeset/ae422bbeeb59/ Log: fix test_whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -114,3 +114,6 @@ app-level. The `Buffer` class is now used by `W_MemoryView` and `W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was an alias to `Buffer`, which was wrappable itself. + +.. branch: improve-consecutive-dict-lookups +Improve the situation when dict lookups of the same key are performed in a chain From noreply at buildbot.pypy.org Thu Mar 20 16:54:26 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 20 Mar 2014 16:54:26 +0100 (CET) Subject: [pypy-commit] pypy default: mention dict differencies Message-ID: <20140320155426.A25EA1C02AF@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70120:eb1389e607fa Date: 2014-03-20 17:53 +0200 http://bitbucket.org/pypy/pypy/changeset/eb1389e607fa/ Log: mention dict differencies diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -292,6 +292,10 @@ depending on the compiler settings, the default of 768KB is enough for about 1400 calls. +* since the implementation of dictionary is different, the exact number + which ``__hash__`` and ``__eq__`` are called is different. Since CPython + does not give any specific guarantees either, don't rely on it. + * assignment to ``__class__`` is limited to the cases where it works on CPython 2.5. On CPython 2.6 and 2.7 it works in a bit more cases, which are not supported by PyPy so far. (If needed, From noreply at buildbot.pypy.org Thu Mar 20 16:58:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 16:58:15 +0100 (CET) Subject: [pypy-commit] pypy default: Try to settle on this order of arguments, which is the same one as the Message-ID: <20140320155815.E78551C1413@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70121:43b3b5bb76fa Date: 2014-03-20 16:57 +0100 http://bitbucket.org/pypy/pypy/changeset/43b3b5bb76fa/ Log: Try to settle on this order of arguments, which is the same one as the one on compile_loop / compile_bridge. diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -434,8 +434,8 @@ self.wb_slowpath[withcards + 2 * withfloats] = rawstart @rgc.no_release_gil - def assemble_loop(self, logger, loopname, inputargs, operations, looptoken, - log): + def assemble_loop(self, inputargs, operations, looptoken, log, + loopname, logger): '''adds the following attributes to looptoken: _ll_function_addr (address of the generated func, as an int) _ll_loop_code (debug: addr of the start of the ResOps) @@ -514,8 +514,8 @@ size_excluding_failure_stuff - looppos) @rgc.no_release_gil - def assemble_bridge(self, logger, faildescr, inputargs, operations, - original_loop_token, log): + def assemble_bridge(self, faildescr, inputargs, operations, + original_loop_token, log, logger): if not we_are_translated(): # Arguments should be unique assert len(set(inputargs)) == len(inputargs) diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -93,16 +93,15 @@ def compile_loop(self, inputargs, operations, looptoken, log=True, name='', logger=None): - return self.assembler.assemble_loop(logger, name, inputargs, operations, - looptoken, log=log) + return self.assembler.assemble_loop(inputargs, operations, looptoken, log, + name, logger) def compile_bridge(self, faildescr, inputargs, operations, original_loop_token, log=True, logger=None): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(logger, faildescr, inputargs, - operations, - original_loop_token, log=log) + return self.assembler.assemble_bridge(faildescr, inputargs, operations, + original_loop_token, log, logger) def clear_latest_values(self, count): setitem = self.assembler.fail_boxes_ptr.setitem From noreply at buildbot.pypy.org Thu Mar 20 18:27:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 18:27:02 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140320172702.AAD2A1C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70122:a82acece79d2 Date: 2014-03-20 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/a82acece79d2/ Log: in-progress diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -685,8 +685,8 @@ @rgc.no_release_gil - def assemble_loop(self, loopname, inputargs, operations, looptoken, log, - logger=None): + def assemble_loop(self, inputargs, operations, looptoken, log, + loopname, logger): '''adds the following attributes to looptoken: _ll_function_addr (address of the generated func, as an int) _ll_loop_code (debug: addr of the start of the ResOps) @@ -767,7 +767,7 @@ @rgc.no_release_gil def assemble_bridge(self, faildescr, inputargs, operations, - original_loop_token, log, logger=None): + original_loop_token, log, logger): if not we_are_translated(): # Arguments should be unique assert len(set(inputargs)) == len(inputargs) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -11,7 +11,7 @@ RegisterManager, TempBox, compute_vars_longevity, is_comparison_or_ovf_op) from rpython.jit.backend.x86 import rx86 from rpython.jit.backend.x86.arch import (WORD, JITFRAME_FIXED_SIZE, IS_X86_32, - IS_X86_64, FRAME_FIXED_SIZE) + IS_X86_64) from rpython.jit.backend.x86.jump import remap_frame_layout_mixed from rpython.jit.backend.x86.regloc import (FrameLoc, RegLoc, ConstFloatLoc, FloatImmedLoc, ImmedLoc, imm, imm0, imm1, ecx, eax, edx, ebx, esi, edi, @@ -26,7 +26,7 @@ from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_uint -from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rtyper.lltypesystem import lltype, rffi, rstr from rpython.rtyper.lltypesystem.lloperation import llop @@ -45,11 +45,9 @@ if isinstance(c, ConstInt): return imm(c.value) elif isinstance(c, ConstPtr): - # if we_are_translated() and c.value and rgc.can_move(c.value): - # not_implemented("convert_to_imm: ConstPtr needs special care") - if c.value and not c.imm_value: + if we_are_translated() and c.value and rgc.can_move(c.value): not_implemented("convert_to_imm: ConstPtr needs special care") - return imm(c.get_imm_value()) + return imm(rffi.cast(lltype.Signed, c.value)) else: not_implemented("convert_to_imm: got a %s" % c) diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -94,16 +94,15 @@ def compile_loop(self, inputargs, operations, looptoken, log=True, name='', logger=None): - return self.assembler.assemble_loop(logger, name, inputargs, operations, - looptoken, log=log) + return self.assembler.assemble_loop(inputargs, operations, looptoken, log, + name, logger) def compile_bridge(self, faildescr, inputargs, operations, original_loop_token, log=True, logger=None): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(logger, faildescr, inputargs, - operations, - original_loop_token, log=log) + return self.assembler.assemble_bridge(faildescr, inputargs, operations, + original_loop_token, log, logger) def guard_already_patched(self, faildescr): # only needed for STM so far diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1,7 +1,7 @@ import weakref from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.annlowlevel import cast_instance_to_gcref -from rpython.rlib.objectmodel import we_are_translated, stm_ignored +from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_start, debug_stop, debug_print from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib import rstack, rgc @@ -394,8 +394,8 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = do_compile_bridge(metainterp_sd, faildescr, - inputargs, operations, + asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, + operations, original_loop_token) finally: debug_stop("jit-backend") @@ -526,8 +526,6 @@ self.status = hash & self.ST_SHIFT_MASK def make_a_counter_per_value(self, guard_value_op): - if rgc.stm_is_enabled(): - return # XXX don't use the special counters in stm mode for now assert guard_value_op.getopnum() == rop.GUARD_VALUE box = guard_value_op.getarg(0) try: diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -9,7 +9,6 @@ from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.codewriter import heaptracker, longlong from rpython.rlib.objectmodel import compute_identity_hash -from rpython.rlib import rgc import weakref # ____________________________________________________________ @@ -298,24 +297,17 @@ class ConstPtr(Const): type = REF value = lltype.nullptr(llmemory.GCREF.TO) - imm_value = 0 - _attrs_ = ('value', 'imm_value',) + _attrs_ = ('value',) def __init__(self, value): assert lltype.typeOf(value) == llmemory.GCREF self.value = value - self.imm_value = 0 def clonebox(self): return BoxPtr(self.value) nonconstbox = clonebox - def get_imm_value(self): - # imm_value set if needed: - assert (not self.value) or self.imm_value - return self.imm_value - def getref_base(self): return self.value diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -18,7 +18,7 @@ from rpython.rlib.jit import Counters from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory +from rpython.rtyper.lltypesystem import lltype, rclass, rffi # ____________________________________________________________ @@ -201,6 +201,7 @@ @arguments("int") def opimpl_stm_should_break_transaction(self, if_there_is_no_other): + from rpython.rtyper.lltypesystem import llmemory val = bool(if_there_is_no_other) mi = self.metainterp if val: @@ -2749,6 +2750,7 @@ # if the codewriter didn't produce any OS_LIBFFI_CALL at all. assert self.staticdata.has_libffi_call # + from rpython.rtyper.lltypesystem import llmemory from rpython.rlib.jit_libffi import CIF_DESCRIPTION_P from rpython.jit.backend.llsupport.ffisupport import get_arg_descr # From noreply at buildbot.pypy.org Thu Mar 20 18:27:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 18:27:04 +0100 (CET) Subject: [pypy-commit] pypy default: From stm: to increment debug counters, replace the three instructions Message-ID: <20140320172704.282F51C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70123:ab65db3705bd Date: 2014-03-20 18:20 +0100 http://bitbucket.org/pypy/pypy/changeset/ab65db3705bd/ Log: From stm: to increment debug counters, replace the three instructions with a single one. diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -15,7 +15,7 @@ DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', # 'b'ridge, 'l'abel or # 'e'ntry point - ('i', lltype.Signed), + ('i', lltype.Signed), # first field, at offset 0 ('type', lltype.Char), ('number', lltype.Signed) ) @@ -64,7 +64,6 @@ self.cpu = cpu self.memcpy_addr = 0 self.rtyper = cpu.rtyper - self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') self._debug = False def setup_once(self): @@ -265,14 +264,8 @@ def _append_debugging_code(self, operations, tp, number, token): counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) - box = BoxInt() - box2 = BoxInt() - ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], - box, descr=self.debug_counter_descr), - ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), - ResOperation(rop.SETFIELD_RAW, [c_adr, box2], - None, descr=self.debug_counter_descr)] - operations.extend(ops) + operations.append( + ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr], None)) def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4338,3 +4338,12 @@ assert rffi.cast(lltype.Signed, a[0]) == -7654 assert rffi.cast(lltype.Signed, a[1]) == 777 lltype.free(a, flavor='raw') + + def test_increment_debug_counter(self): + foo = lltype.malloc(rffi.CArray(lltype.Signed), 1, flavor='raw') + foo[0] = 1789200 + self.execute_operation(rop.INCREMENT_DEBUG_COUNTER, + [ConstInt(rffi.cast(lltype.Signed, foo))], + 'void') + assert foo[0] == 1789201 + lltype.free(foo, flavor='raw') diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1469,6 +1469,14 @@ ofs_loc) self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc) + def genop_discard_increment_debug_counter(self, op, arglocs): + # The argument should be an immediate address. This should + # generate code equivalent to a GETFIELD_RAW, an ADD(1), and a + # SETFIELD_RAW. Here we use the direct from-memory-to-memory + # increment operation of x86. + base_loc, = arglocs + self.mc.INC(mem(base_loc, 0)) + def genop_discard_setfield_gc(self, op, arglocs): base_loc, ofs_loc, size_loc, value_loc = arglocs assert isinstance(size_loc, ImmedLoc) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1003,6 +1003,10 @@ consider_getfield_raw_pure = consider_getfield_gc consider_getfield_gc_pure = consider_getfield_gc + def consider_increment_debug_counter(self, op): + base_loc = self.loc(op.getarg(0)) + self.perform_discard(op, [base_loc]) + def consider_getarrayitem_gc(self, op): itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) args = op.getarglist() diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -488,12 +488,22 @@ for possible_code in unrolling_location_codes: if code == possible_code: val = getattr(loc, "value_" + possible_code)() - if self.WORD == 8 and possible_code == 'i' and not rx86.fits_in_32bits(val): - self._load_scratch(val) + # Faking out of certain operations for x86_64 + fits32 = rx86.fits_in_32bits + if possible_code == 'i' and not fits32(val): + self._load_scratch(val) # for 'PUSH(imm)' _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) - else: - methname = name + "_" + possible_code - _rx86_getattr(self, methname)(val) + return + if possible_code == 'j' and not fits32(val): + val = self._addr_as_reg_offset(val) + _rx86_getattr(self, name + "_m")(val) + return + if possible_code == 'm' and not fits32(val[1]): + val = self._fix_static_offset_64_m(val) + if possible_code == 'a' and not fits32(val[3]): + val = self._fix_static_offset_64_a(val) + methname = name + "_" + possible_code + _rx86_getattr(self, methname)(val) return func_with_new_name(INSN, "INSN_" + name) @@ -600,6 +610,7 @@ TEST8 = _binaryop('TEST8') BTS = _binaryop('BTS') + INC = _unaryop('INC') ADD = _binaryop('ADD') SUB = _binaryop('SUB') IMUL = _binaryop('IMUL') diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -470,6 +470,9 @@ # ------------------------------ Arithmetic ------------------------------ + INC_m = insn(rex_w, '\xFF', orbyte(0), mem_reg_plus_const(1)) + INC_j = insn(rex_w, '\xFF', orbyte(0), abs_(1)) + ADD_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_ = common_modes(0) OR_ri, OR_rr, OR_rb, _,_,OR_rm, OR_rj, _,_ = common_modes(1) AND_ri,AND_rr,AND_rb,_,_,AND_rm,AND_rj,_,_ = common_modes(4) diff --git a/rpython/jit/backend/x86/test/test_regloc.py b/rpython/jit/backend/x86/test/test_regloc.py --- a/rpython/jit/backend/x86/test/test_regloc.py +++ b/rpython/jit/backend/x86/test/test_regloc.py @@ -373,3 +373,56 @@ '\x59' ) assert cb.getvalue() == expected_instructions + + # ------------------------------------------------------------ + + def test_push_immed64(self): + immed = 0x0123456789ABCDEF + cb = LocationCodeBuilder64() + cb.PUSH(imm(immed)) + # + expected_instructions = ( + # mov r11, 0x0123456789ABCDEF + '\x49\xBB\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # push r11 + '\x41\x53' + ) + assert cb.getvalue() == expected_instructions + + def test_inc_64bit_address_1(self): + base_addr = 0x0123456789ABCDEF + cb = LocationCodeBuilder64() + cb.INC(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr)) + # this case is a INC_j + # + expected_instructions = ( + # mov r11, 0x0123456789ABCDEF + '\x49\xBB\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # inc [r11] + '\x49\xFF\x03' + ) + assert cb.getvalue() == expected_instructions + + def test_inc_64bit_address_2(self): + py.test.skip("there is no unary instruction INSN_a so far") + base_addr = 0x0123456789ABCDEF + cb = LocationCodeBuilder64() + cb.INC(AddressLoc(ImmedLoc(0), edx, 3, base_addr)) + # this case would be a INC_a + xxx + + def test_inc_64bit_address_3(self): + base_addr = 0x0123456789ABCDEF + cb = LocationCodeBuilder64() + cb.INC(AddressLoc(eax, ImmedLoc(0), 0, base_addr)) + # this case is a INC_m + # + expected_instructions = ( + # mov r11, 0x0123456789ABCDEF + '\x49\xBB\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # lea r11, [rax+r11] + '\x4E\x8D\x1C\x18' + # inc [r11] + '\x49\xFF\x03' + ) + assert cb.getvalue() == expected_instructions diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -427,8 +427,8 @@ debug._log = None # assert ops_offset is looptoken._x86_ops_offset - # 2*(getfield_raw/int_add/setfield_raw) + ops + None - assert len(ops_offset) == 2*3 + len(operations) + 1 + # 2*increment_debug_counter + ops + None + assert len(ops_offset) == 2 + len(operations) + 1 assert (ops_offset[operations[0]] <= ops_offset[operations[1]] <= ops_offset[operations[2]] <= diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -332,6 +332,7 @@ continue if value in (rop.FORCE_TOKEN, rop.CALL_ASSEMBLER, + rop.INCREMENT_DEBUG_COUNTER, rop.COND_CALL_GC_WB, rop.COND_CALL_GC_WB_ARRAY, rop.DEBUG_MERGE_POINT, diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -494,6 +494,7 @@ # must be forced, however we need to execute it anyway '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- + 'INCREMENT_DEBUG_COUNTER/1', 'SETARRAYITEM_GC/3d', 'SETARRAYITEM_RAW/3d', 'SETINTERIORFIELD_GC/3d', From noreply at buildbot.pypy.org Thu Mar 20 18:27:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 18:27:05 +0100 (CET) Subject: [pypy-commit] pypy default: The strict minimum for ARM is this, I think Message-ID: <20140320172705.6DEA51C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70124:aaf62bccd9c3 Date: 2014-03-20 18:26 +0100 http://bitbucket.org/pypy/pypy/changeset/aaf62bccd9c3/ Log: The strict minimum for ARM is this, I think diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -583,6 +583,10 @@ emit_op_getfield_raw_pure = emit_op_getfield_gc emit_op_getfield_gc_pure = emit_op_getfield_gc + def emit_op_increment_debug_counter(self, op, arglocs, regalloc, fcond): + # XXX implement me + return fcond + def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, res_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -849,6 +849,10 @@ prepare_op_getfield_raw_pure = prepare_op_getfield_gc prepare_op_getfield_gc_pure = prepare_op_getfield_gc + def prepare_op_increment_debug_counter(self, op, fcond): + # XXX implement me + return [] + def prepare_op_getinteriorfield_gc(self, op, fcond): t = unpack_interiorfielddescr(op.getdescr()) ofs, itemsize, fieldsize, sign = t From noreply at buildbot.pypy.org Thu Mar 20 18:46:05 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 20 Mar 2014 18:46:05 +0100 (CET) Subject: [pypy-commit] pypy default: backport 9813b8a36001 from py3k: cache UnsupportedOperation but this time to Message-ID: <20140320174605.B9E8F1C1413@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70125:f15f9a524509 Date: 2014-03-20 10:42 -0700 http://bitbucket.org/pypy/pypy/changeset/f15f9a524509/ Log: backport 9813b8a36001 from py3k: cache UnsupportedOperation but this time to avoid potential module reloading issues diff --git a/pypy/module/_io/__init__.py b/pypy/module/_io/__init__.py --- a/pypy/module/_io/__init__.py +++ b/pypy/module/_io/__init__.py @@ -8,6 +8,8 @@ interpleveldefs = { 'DEFAULT_BUFFER_SIZE': 'space.wrap(interp_iobase.DEFAULT_BUFFER_SIZE)', 'BlockingIOError': 'interp_io.W_BlockingIOError', + 'UnsupportedOperation': + 'space.fromcache(interp_io.Cache).w_unsupportedoperation', '_IOBase': 'interp_iobase.W_IOBase', '_RawIOBase': 'interp_iobase.W_RawIOBase', '_BufferedIOBase': 'interp_bufferedio.W_BufferedIOBase', @@ -26,16 +28,6 @@ 'IncrementalNewlineDecoder': 'interp_textio.W_IncrementalNewlineDecoder', } - def init(self, space): - MixedModule.init(self, space) - w_UnsupportedOperation = space.call_function( - space.w_type, - space.wrap('UnsupportedOperation'), - space.newtuple([space.w_ValueError, space.w_IOError]), - space.newdict()) - space.setattr(self, space.wrap('UnsupportedOperation'), - w_UnsupportedOperation) - def shutdown(self, space): # at shutdown, flush all open streams. Ignore I/O errors. from pypy.module._io.interp_iobase import get_autoflusher diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -10,6 +10,12 @@ from rpython.rtyper.module.ll_os_stat import STAT_FIELD_TYPES +class Cache: + def __init__(self, space): + self.w_unsupportedoperation = space.new_exception_class( + "io.UnsupportedOperation", + space.newtuple([space.w_ValueError, space.w_IOError])) + class W_BlockingIOError(W_IOError): def __init__(self, space): W_IOError.__init__(self, space) From noreply at buildbot.pypy.org Thu Mar 20 19:46:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 19:46:44 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for the test in llgraph/test/ Message-ID: <20140320184644.4224E1C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70126:c6230cd996a0 Date: 2014-03-20 19:42 +0100 http://bitbucket.org/pypy/pypy/changeset/c6230cd996a0/ Log: Fix for the test in llgraph/test/ diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -553,6 +553,10 @@ else: return self.bh_raw_load_i(struct, offset, descr) + def bh_increment_debug_counter(self, addr): + p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) + p[0] += 1 + def unpack_arraydescr_size(self, arraydescr): from rpython.jit.backend.llsupport.symbolic import get_array_token from rpython.jit.backend.llsupport.descr import get_type_flag, FLAG_SIGNED From noreply at buildbot.pypy.org Thu Mar 20 20:11:04 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 20 Mar 2014 20:11:04 +0100 (CET) Subject: [pypy-commit] pypy default: fix builtin reimport/reload (test_zipimport_deflated failure) Message-ID: <20140320191104.3097D1C02AF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70127:f29413fd44b7 Date: 2014-03-20 15:09 -0400 http://bitbucket.org/pypy/pypy/changeset/f29413fd44b7/ Log: fix builtin reimport/reload (test_zipimport_deflated failure) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -465,13 +465,8 @@ if isinstance(w_mod, Module): if not reuse and w_mod.startup_called: # Create a copy of the module - w_mod.getdict(self) # unlazy w_initialdict - w_new = self.wrap(Module(self, w_name)) - self.call_method(w_new.getdict(self), 'update', - w_mod.w_initialdict) - w_mod = w_new - else: - w_mod.init(self) + w_mod = self.wrap(w_mod.__class__(self, w_name)) + w_mod.init(self) # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) From noreply at buildbot.pypy.org Thu Mar 20 20:11:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 20:11:54 +0100 (CET) Subject: [pypy-commit] pypy default: Fix x86/test/test_z* Message-ID: <20140320191154.49E871C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70128:855c8b15eee6 Date: 2014-03-20 20:11 +0100 http://bitbucket.org/pypy/pypy/changeset/855c8b15eee6/ Log: Fix x86/test/test_z* diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -306,6 +306,7 @@ def _optimize_CALL_DICT_LOOKUP(self, op): descrs = op.getdescr().get_extra_info().extradescrs + assert descrs # translation hint descr1 = descrs[0] descr2 = descrs[1] if descr1 in self.cached_dict_reads: From noreply at buildbot.pypy.org Thu Mar 20 20:24:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 20:24:01 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Remove this Message-ID: <20140320192401.6498E1C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70129:d272e010c27b Date: 2014-03-20 20:13 +0100 http://bitbucket.org/pypy/pypy/changeset/d272e010c27b/ Log: Remove this diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1032,9 +1032,6 @@ def execute_stm_transaction_break(self, _, really_wanted): pass - def execute_increment_debug_counter(self, descr, a): - pass - def execute_keepalive(self, descr, x): pass From noreply at buildbot.pypy.org Thu Mar 20 20:24:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 20:24:05 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: hg merge default Message-ID: <20140320192405.D02721C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70130:5d12511d4ee3 Date: 2014-03-20 20:22 +0100 http://bitbucket.org/pypy/pypy/changeset/5d12511d4ee3/ Log: hg merge default diff too long, truncating to 2000 out of 6372 lines diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -171,7 +171,7 @@ # very inconsisten on CPython. In PyPy, memoryview supports # the buffer interface, and thus the following comparison # succeeds. See also the comment in - # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer # # Comparison with objects which don't support the buffer API self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -292,6 +292,10 @@ depending on the compiler settings, the default of 768KB is enough for about 1400 calls. +* since the implementation of dictionary is different, the exact number + which ``__hash__`` and ``__eq__`` are called is different. Since CPython + does not give any specific guarantees either, don't rely on it. + * assignment to ``__class__`` is limited to the cases where it works on CPython 2.5. On CPython 2.6 and 2.7 it works in a bit more cases, which are not supported by PyPy so far. (If needed, diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -105,3 +105,15 @@ .. branch: stdlib-2.7.6 Update stdlib to v2.7.6 + +.. branch: virtual-raw-store-load +Support for virtualizing raw_store/raw_load operations + +.. branch: refactor-buffer-api +Separate the interp-level buffer API from the buffer type exposed to +app-level. The `Buffer` class is now used by `W_MemoryView` and +`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was +an alias to `Buffer`, which was wrappable itself. + +.. branch: improve-consecutive-dict-lookups +Improve the situation when dict lookups of the same key are performed in a chain diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -194,6 +194,14 @@ def immutable_unique_id(self, space): return None + def buffer_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.buffer_w(space) + self._typed_unwrap_error(space, "buffer") + def str_w(self, space): self._typed_unwrap_error(space, "string") @@ -432,17 +440,16 @@ return name - def getbuiltinmodule(self, name, force_init=False): + def getbuiltinmodule(self, name, force_init=False, reuse=True): w_name = self.wrap(name) w_modules = self.sys.get('modules') - try: - w_mod = self.getitem(w_modules, w_name) - except OperationError, e: - if not e.match(self, self.w_KeyError): - raise - else: - if not force_init: - return w_mod + if not force_init: + assert reuse is True + try: + return self.getitem(w_modules, w_name) + except OperationError, e: + if not e.match(self, self.w_KeyError): + raise # If the module is a builtin but not yet imported, # retrieve it and initialize it @@ -453,13 +460,16 @@ "getbuiltinmodule() called with non-builtin module %s", name) else: + # Initialize the module + from pypy.interpreter.module import Module + if isinstance(w_mod, Module): + if not reuse and w_mod.startup_called: + # Create a copy of the module + w_mod = self.wrap(w_mod.__class__(self, w_name)) + w_mod.init(self) + # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) - - # And initialize it - from pypy.interpreter.module import Module - if isinstance(w_mod, Module): - w_mod.init(self) return w_mod def get_builtinmodule_to_install(self): @@ -1320,10 +1330,7 @@ 'to unsigned int')) def buffer_w(self, w_obj): - # returns a Buffer instance - from pypy.interpreter.buffer import Buffer - w_buffer = self.buffer(w_obj) - return self.interp_w(Buffer, w_buffer) + return w_obj.buffer_w(self) def rwbuffer_w(self, w_obj): # returns a RWBuffer instance @@ -1683,7 +1690,6 @@ ('set', 'set', 3, ['__set__']), ('delete', 'delete', 2, ['__delete__']), ('userdel', 'del', 1, ['__del__']), - ('buffer', 'buffer', 1, ['__buffer__']), # see buffer.py ] ObjSpace.BuiltinModuleTable = [ diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -1,32 +1,12 @@ """ Buffer protocol support. """ +from rpython.rlib.objectmodel import import_from_mixin -# The implementation of the buffer protocol. The basic idea is that we -# can ask any app-level object for a 'buffer' view on it, by calling its -# __buffer__() special method. It should return a wrapped instance of a -# subclass of the Buffer class defined below. Note that __buffer__() is -# a PyPy-only extension to the Python language, made necessary by the -# fact that it's not natural in PyPy to hack an interp-level-only -# interface. -# In normal usage, the convenience method space.buffer_w() should be -# used to get directly a Buffer instance. Doing so also gives you for -# free the typecheck that __buffer__() really returned a wrapped Buffer. - -import operator -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash, import_from_mixin -from rpython.rlib.rstring import StringBuilder - - -class Buffer(W_Root): - """Abstract base class for memory views.""" - - __slots__ = () # no extra slot here +class Buffer(object): + """Abstract base class for buffers.""" + __slots__ = [] def getlength(self): raise NotImplementedError @@ -50,93 +30,10 @@ def is_writable(self): return False - # __________ app-level support __________ - - def descr_len(self, space): - return space.wrap(self.getlength()) - - def descr_getitem(self, space, w_index): - start, stop, step, size = space.decode_index4(w_index, self.getlength()) - if step == 0: # index only - return space.wrap(self.getitem(start)) - res = self.getslice(start, stop, step, size) - return space.wrap(res) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - if not isinstance(self, RWBuffer): - raise OperationError(space.w_TypeError, - space.wrap("buffer is read-only")) - start, stop, step, size = space.decode_index4(w_index, self.getlength()) - if step == 0: # index only - if len(newstring) != 1: - msg = 'buffer[index]=x: x must be a single character' - raise OperationError(space.w_TypeError, space.wrap(msg)) - char = newstring[0] # annotator hint - self.setitem(start, char) - elif step == 1: - if len(newstring) != size: - msg = "right operand length must match slice length" - raise OperationError(space.w_ValueError, space.wrap(msg)) - self.setslice(start, newstring) - else: - raise OperationError(space.w_ValueError, - space.wrap("buffer object does not support" - " slicing with a step")) - - def descr__buffer__(self, space): - return space.wrap(self) - - def descr_str(self, space): - return space.wrap(self.as_str()) - - @unwrap_spec(other='bufferstr') - def descr_add(self, space, other): - return space.wrap(self.as_str() + other) - - def _make_descr__cmp(name): - def descr__cmp(self, space, w_other): - if not isinstance(w_other, Buffer): - return space.w_NotImplemented - # xxx not the most efficient implementation - str1 = self.as_str() - str2 = w_other.as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - descr__cmp.func_name = name - return descr__cmp - - descr_eq = _make_descr__cmp('eq') - descr_ne = _make_descr__cmp('ne') - descr_lt = _make_descr__cmp('lt') - descr_le = _make_descr__cmp('le') - descr_gt = _make_descr__cmp('gt') - descr_ge = _make_descr__cmp('ge') - - def descr_hash(self, space): - return space.wrap(compute_hash(self.as_str())) - - def descr_mul(self, space, w_times): - # xxx not the most efficient implementation - w_string = space.wrap(self.as_str()) - # use the __mul__ method instead of space.mul() so that we - # return NotImplemented instead of raising a TypeError - return space.call_method(w_string, '__mul__', w_times) - - def descr_repr(self, space): - if isinstance(self, RWBuffer): - info = 'read-write buffer' - else: - info = 'read-only buffer' - addrstring = self.getaddrstring(space) - - return space.wrap("<%s for 0x%s, size %d>" % - (info, addrstring, self.getlength())) - class RWBuffer(Buffer): - """Abstract base class for read-write memory views.""" - - __slots__ = () # no extra slot here + """Abstract base class for read-write buffers.""" + __slots__ = [] def is_writable(self): return True @@ -151,76 +48,8 @@ self.setitem(start + i, string[i]) - at unwrap_spec(offset=int, size=int) -def descr_buffer__new__(space, w_subtype, w_object, offset=0, size=-1): - # w_subtype can only be exactly 'buffer' for now - if not space.is_w(w_subtype, space.gettypefor(Buffer)): - raise OperationError(space.w_TypeError, - space.wrap("argument 1 must be 'buffer'")) - - if space.isinstance_w(w_object, space.w_unicode): - # unicode objects support the old buffer interface - # but not the new buffer interface (change in python 2.7) - from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE - unistr = space.unicode_w(w_object) - builder = StringBuilder(len(unistr) * UNICODE_SIZE) - for unich in unistr: - pack_unichar(unich, builder) - from pypy.interpreter.buffer import StringBuffer - w_buffer = space.wrap(StringBuffer(builder.build())) - else: - w_buffer = space.buffer(w_object) - - buffer = space.interp_w(Buffer, w_buffer) # type-check - if offset == 0 and size == -1: - return w_buffer - # handle buffer slices - if offset < 0: - raise OperationError(space.w_ValueError, - space.wrap("offset must be zero or positive")) - if size < -1: - raise OperationError(space.w_ValueError, - space.wrap("size must be zero or positive")) - if isinstance(buffer, RWBuffer): - buffer = RWSubBuffer(buffer, offset, size) - else: - buffer = SubBuffer(buffer, offset, size) - return space.wrap(buffer) - - -Buffer.typedef = TypeDef( - "buffer", - __doc__ = """\ -buffer(object [, offset[, size]]) - -Create a new buffer object which references the given object. -The buffer will reference a slice of the target object from the -start of the object (or at the specified offset). The slice will -extend to the end of the target object (or with the specified size). -""", - __new__ = interp2app(descr_buffer__new__), - __len__ = interp2app(Buffer.descr_len), - __getitem__ = interp2app(Buffer.descr_getitem), - __setitem__ = interp2app(Buffer.descr_setitem), - __buffer__ = interp2app(Buffer.descr__buffer__), - __str__ = interp2app(Buffer.descr_str), - __add__ = interp2app(Buffer.descr_add), - __eq__ = interp2app(Buffer.descr_eq), - __ne__ = interp2app(Buffer.descr_ne), - __lt__ = interp2app(Buffer.descr_lt), - __le__ = interp2app(Buffer.descr_le), - __gt__ = interp2app(Buffer.descr_gt), - __ge__ = interp2app(Buffer.descr_ge), - __hash__ = interp2app(Buffer.descr_hash), - __mul__ = interp2app(Buffer.descr_mul), - __rmul__ = interp2app(Buffer.descr_mul), - __repr__ = interp2app(Buffer.descr_repr), -) -Buffer.typedef.acceptable_as_base_class = False - -# ____________________________________________________________ - class StringBuffer(Buffer): + __slots__ = ['value'] def __init__(self, value): self.value = value @@ -241,43 +70,12 @@ assert 0 <= start <= stop return self.value[start:stop] return "".join([self.value[start + i*step] for i in xrange(size)]) - - -class StringLikeBuffer(Buffer): - """For app-level objects that already have a string-like interface - with __len__ and a __getitem__ that returns characters or (with - slicing) substrings.""" - # XXX this is inefficient, it should only be used temporarily - - def __init__(self, space, w_obj): - self.space = space - self.w_obj = w_obj - - def getlength(self): - space = self.space - return space.len_w(self.w_obj) - - def getitem(self, index): - space = self.space - s = space.str_w(space.getitem(self.w_obj, space.wrap(index))) - if len(s) != 1: - raise OperationError(space.w_ValueError, - space.wrap("character expected, got string")) - char = s[0] # annotator hint - return char - - def getslice(self, start, stop, step, size): - space = self.space - if step != 1: - raise OperationError(space.w_ValueError, space.wrap( - "buffer object does not support slicing with a step")) - s = space.str_w(space.getslice(self.w_obj, space.wrap(start), - space.wrap(stop))) - return s - # ____________________________________________________________ + class SubBufferMixin(object): + _attrs_ = ['buffer', 'offset', 'size'] + def __init__(self, buffer, offset, size): self.buffer = buffer self.offset = offset @@ -299,11 +97,14 @@ if start == stop: return '' # otherwise, adding self.offset might make them # out of bounds - return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) + return self.buffer.getslice(self.offset + start, self.offset + stop, + step, size) + class SubBuffer(Buffer): import_from_mixin(SubBufferMixin) + class RWSubBuffer(RWBuffer): import_from_mixin(SubBufferMixin) diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py --- a/pypy/interpreter/test/test_buffer.py +++ b/pypy/interpreter/test/test_buffer.py @@ -1,29 +1,25 @@ import py -from pypy.interpreter.buffer import Buffer from rpython.tool.udir import udir testdir = udir.ensure('test_buffer', dir=1) class TestBuffer: - def test_buffer_w(self): space = self.space w_hello = space.wrap('hello world') buf = space.buffer_w(w_hello) - assert isinstance(buf, Buffer) assert buf.getlength() == 11 assert buf.as_str() == 'hello world' assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.wrap(buf)) is buf + assert space.buffer_w(space.newbuffer(buf)) is buf assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.buffer(w_hello)) == 'hello world' + assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello))) == 'hello world' space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) - space.raises_w(space.w_TypeError, space.buffer, space.wrap(5)) def test_file_write(self): space = self.space - w_buffer = space.buffer(space.wrap('hello world')) + w_buffer = space.newbuffer(space.buffer_w(space.wrap('hello world'))) filename = str(testdir.join('test_file_write')) space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): f = open(filename, 'wb') @@ -44,4 +40,4 @@ space.bufferstr_w, space.wrap(u'\xe9')) -# Note: some app-level tests for buffer are in module/__builtin__/test/. +# Note: some app-level tests for buffer are in objspace/std/test/test_memoryview.py. diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -33,16 +33,11 @@ interpleveldefs = { # constants + '__debug__' : '(space.w_True)', # XXX 'None' : '(space.w_None)', 'False' : '(space.w_False)', 'True' : '(space.w_True)', - '__debug__' : '(space.w_True)', # XXX - 'type' : '(space.w_type)', - 'object' : '(space.w_object)', 'bytes' : '(space.w_str)', - 'unicode' : '(space.w_unicode)', - 'buffer' : 'interp_memoryview.W_Buffer', - 'memoryview' : 'interp_memoryview.W_MemoryView', 'file' : 'state.get(space).w_file', 'open' : 'state.get(space).w_file', diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py deleted file mode 100644 --- a/pypy/module/__builtin__/interp_memoryview.py +++ /dev/null @@ -1,168 +0,0 @@ -""" -Implementation of the 'buffer' and 'memoryview' types. -""" -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import buffer -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.error import OperationError -import operator - -W_Buffer = buffer.Buffer # actually implemented in pypy.interpreter.buffer - - -class W_MemoryView(W_Root): - """Implement the built-in 'memoryview' type as a thin wrapper around - an interp-level buffer. - """ - - def __init__(self, buf): - assert isinstance(buf, buffer.Buffer) - self.buf = buf - - def _make_descr__cmp(name): - def descr__cmp(self, space, w_other): - if isinstance(w_other, W_MemoryView): - # xxx not the most efficient implementation - str1 = self.as_str() - str2 = w_other.as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - - try: - w_buf = space.buffer(w_other) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - return space.w_NotImplemented - else: - str1 = self.as_str() - str2 = space.buffer_w(w_buf).as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - descr__cmp.func_name = name - return descr__cmp - - descr_eq = _make_descr__cmp('eq') - descr_ne = _make_descr__cmp('ne') - descr_lt = _make_descr__cmp('lt') - descr_le = _make_descr__cmp('le') - descr_gt = _make_descr__cmp('gt') - descr_ge = _make_descr__cmp('ge') - - def as_str(self): - return self.buf.as_str() - - def getlength(self): - return self.buf.getlength() - - def getslice(self, start, stop): - if start < 0: - start = 0 - size = stop - start - if size < 0: - size = 0 - buf = self.buf - if isinstance(buf, buffer.RWBuffer): - buf = buffer.RWSubBuffer(buf, start, size) - else: - buf = buffer.SubBuffer(buf, start, size) - return W_MemoryView(buf) - - def descr_buffer(self, space): - """ - Note that memoryview() is very inconsistent in CPython: it does not - support the buffer interface but does support the new buffer - interface: as a result, it is possible to pass memoryview to - e.g. socket.send() but not to file.write(). For simplicity and - consistency, in PyPy memoryview DOES support buffer(), which means - that it is accepted in more places than CPython. - """ - return space.wrap(self.buf) - - def descr_tobytes(self, space): - return space.wrap(self.as_str()) - - def descr_tolist(self, space): - buf = self.buf - result = [] - for i in range(buf.getlength()): - result.append(space.wrap(ord(buf.getitem(i)))) - return space.newlist(result) - - def descr_getitem(self, space, w_index): - start, stop, step = space.decode_index(w_index, self.getlength()) - if step == 0: # index only - return space.wrap(self.buf.getitem(start)) - elif step == 1: - res = self.getslice(start, stop) - return space.wrap(res) - else: - raise OperationError(space.w_ValueError, - space.wrap("memoryview object does not support" - " slicing with a step")) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - buf = self.buf - if isinstance(buf, buffer.RWBuffer): - buf.descr_setitem(space, w_index, newstring) - else: - raise OperationError(space.w_TypeError, - space.wrap("cannot modify read-only memory")) - - def descr_len(self, space): - return self.buf.descr_len(space) - - def w_get_format(self, space): - return space.wrap("B") - - def w_get_itemsize(self, space): - return space.wrap(1) - - def w_get_ndim(self, space): - return space.wrap(1) - - def w_is_readonly(self, space): - return space.wrap(not isinstance(self.buf, buffer.RWBuffer)) - - def w_get_shape(self, space): - return space.newtuple([space.wrap(self.getlength())]) - - def w_get_strides(self, space): - return space.newtuple([space.wrap(1)]) - - def w_get_suboffsets(self, space): - # I've never seen anyone filling this field - return space.w_None - - -def descr_new(space, w_subtype, w_object): - memoryview = W_MemoryView(space.buffer(w_object)) - return space.wrap(memoryview) - -W_MemoryView.typedef = TypeDef( - "memoryview", - __doc__ = """\ -Create a new memoryview object which references the given object. -""", - __new__ = interp2app(descr_new), - __buffer__ = interp2app(W_MemoryView.descr_buffer), - __eq__ = interp2app(W_MemoryView.descr_eq), - __ge__ = interp2app(W_MemoryView.descr_ge), - __getitem__ = interp2app(W_MemoryView.descr_getitem), - __gt__ = interp2app(W_MemoryView.descr_gt), - __le__ = interp2app(W_MemoryView.descr_le), - __len__ = interp2app(W_MemoryView.descr_len), - __lt__ = interp2app(W_MemoryView.descr_lt), - __ne__ = interp2app(W_MemoryView.descr_ne), - __setitem__ = interp2app(W_MemoryView.descr_setitem), - tobytes = interp2app(W_MemoryView.descr_tobytes), - tolist = interp2app(W_MemoryView.descr_tolist), - format = GetSetProperty(W_MemoryView.w_get_format), - itemsize = GetSetProperty(W_MemoryView.w_get_itemsize), - ndim = GetSetProperty(W_MemoryView.w_get_ndim), - readonly = GetSetProperty(W_MemoryView.w_is_readonly), - shape = GetSetProperty(W_MemoryView.w_get_shape), - strides = GetSetProperty(W_MemoryView.w_get_strides), - suboffsets = GetSetProperty(W_MemoryView.w_get_suboffsets), - ) -W_MemoryView.typedef.acceptable_as_base_class = False diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/module/__builtin__/test/test_buffer.py deleted file mode 100644 --- a/pypy/module/__builtin__/test/test_buffer.py +++ /dev/null @@ -1,229 +0,0 @@ -"""Tests some behaviour of the buffer type that is not tested in -lib-python/2.5.2/test/test_types.py where the stdlib buffer tests live.""" - -class AppTestBuffer: - spaceconfig = dict(usemodules=['array']) - - def test_unicode_buffer(self): - import sys - b = buffer(u"ab") - if sys.maxunicode == 65535: # UCS2 build - assert len(b) == 4 - if sys.byteorder == "big": - assert b[0:4] == "\x00a\x00b" - else: - assert b[0:4] == "a\x00b\x00" - else: # UCS4 build - assert len(b) == 8 - if sys.byteorder == "big": - assert b[0:8] == "\x00\x00\x00a\x00\x00\x00b" - else: - assert b[0:8] == "a\x00\x00\x00b\x00\x00\x00" - - def test_array_buffer(self): - import array - b = buffer(array.array("B", [1, 2, 3])) - assert len(b) == 3 - assert b[0:3] == "\x01\x02\x03" - - def test_nonzero(self): - assert buffer('\x00') - assert not buffer('') - import array - assert buffer(array.array("B", [0])) - assert not buffer(array.array("B", [])) - - def test_str(self): - assert str(buffer('hello')) == 'hello' - - def test_repr(self): - # from 2.5.2 lib tests - assert repr(buffer('hello')).startswith(' buffer('ab')) - assert buffer('ab') >= buffer('ab') - assert buffer('ab') != buffer('abc') - assert buffer('ab') < buffer('abc') - assert buffer('ab') <= buffer('ab') - assert buffer('ab') > buffer('aa') - assert buffer('ab') >= buffer('ab') - - def test_hash(self): - assert hash(buffer('hello')) == hash('hello') - - def test_mul(self): - assert buffer('ab') * 5 == 'ababababab' - assert buffer('ab') * (-2) == '' - assert 5 * buffer('ab') == 'ababababab' - assert (-2) * buffer('ab') == '' - - def test_offset_size(self): - b = buffer('hello world', 6) - assert len(b) == 5 - assert b[0] == 'w' - assert b[:] == 'world' - raises(IndexError, 'b[5]') - b = buffer(b, 2) - assert len(b) == 3 - assert b[0] == 'r' - assert b[:] == 'rld' - raises(IndexError, 'b[3]') - b = buffer('hello world', 1, 8) - assert len(b) == 8 - assert b[0] == 'e' - assert b[:] == 'ello wor' - raises(IndexError, 'b[8]') - b = buffer(b, 2, 3) - assert len(b) == 3 - assert b[2] == ' ' - assert b[:] == 'lo ' - raises(IndexError, 'b[3]') - b = buffer('hello world', 55) - assert len(b) == 0 - assert b[:] == '' - b = buffer('hello world', 6, 999) - assert len(b) == 5 - assert b[:] == 'world' - - raises(ValueError, buffer, "abc", -1) - raises(ValueError, buffer, "abc", 0, -2) - - def test_rw_offset_size(self): - import array - - a = array.array("c", 'hello world') - b = buffer(a, 6) - assert len(b) == 5 - assert b[0] == 'w' - assert b[:] == 'world' - raises(IndexError, 'b[5]') - b[0] = 'W' - assert str(b) == 'World' - assert a.tostring() == 'hello World' - b[:] = '12345' - assert a.tostring() == 'hello 12345' - raises(IndexError, 'b[5] = "."') - b[4:2] = '' - assert a.tostring() == 'hello 12345' - - b = buffer(b, 2) - assert len(b) == 3 - assert b[0] == '3' - assert b[:] == '345' - raises(IndexError, 'b[3]') - b[1] = 'X' - assert a.tostring() == 'hello 123X5' - raises(IndexError, 'b[3] = "."') - - a = array.array("c", 'hello world') - b = buffer(a, 1, 8) - assert len(b) == 8 - assert b[0] == 'e' - assert b[:] == 'ello wor' - raises(IndexError, 'b[8]') - b[0] = 'E' - assert str(b) == 'Ello wor' - assert a.tostring() == 'hEllo world' - b[:] = '12345678' - assert a.tostring() == 'h12345678ld' - raises(IndexError, 'b[8] = "."') - - b = buffer(b, 2, 3) - assert len(b) == 3 - assert b[2] == '5' - assert b[:] == '345' - raises(IndexError, 'b[3]') - b[1] = 'X' - assert a.tostring() == 'h123X5678ld' - raises(IndexError, 'b[3] = "."') - - b = buffer(a, 55) - assert len(b) == 0 - assert b[:] == '' - b = buffer(a, 6, 999) - assert len(b) == 5 - assert b[:] == '678ld' - - raises(ValueError, buffer, a, -1) - raises(ValueError, buffer, a, 0, -2) - - def test_slice(self): - # Test extended slicing by comparing with list slicing. - s = "".join(chr(c) for c in list(range(255, -1, -1))) - b = buffer(s) - indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300) - for start in indices: - for stop in indices: - # Skip step 0 (invalid) - for step in indices[1:]: - assert b[start:stop:step] == s[start:stop:step] - - def test_getitem_only_ints(self): - class MyInt(object): - def __init__(self, x): - self.x = x - - def __int__(self): - return self.x - - buf = buffer('hello world') - raises(TypeError, "buf[MyInt(0)]") - raises(TypeError, "buf[MyInt(0):MyInt(5)]") - - -class AppTestMemoryView: - def test_basic(self): - v = memoryview("abc") - assert v.tobytes() == "abc" - assert len(v) == 3 - assert list(v) == ['a', 'b', 'c'] - assert v.tolist() == [97, 98, 99] - assert v[1] == "b" - assert v[-1] == "c" - raises(TypeError, "v[1] = 'x'") - assert v.readonly is True - w = v[1:234] - assert isinstance(w, memoryview) - assert len(w) == 2 - - def test_rw(self): - data = bytearray('abcefg') - v = memoryview(data) - assert v.readonly is False - v[0] = 'z' - assert data == bytearray(eval("b'zbcefg'")) - v[1:4] = '123' - assert data == bytearray(eval("b'z123fg'")) - raises((ValueError, TypeError), "v[2] = 'spam'") - - def test_memoryview_attrs(self): - v = memoryview("a"*100) - assert v.format == "B" - assert v.itemsize == 1 - assert v.shape == (100,) - assert v.ndim == 1 - assert v.strides == (1,) - - def test_suboffsets(self): - v = memoryview("a"*100) - assert v.suboffsets == None - v = memoryview(buffer("a"*100, 2)) - assert v.shape == (98,) - assert v.suboffsets == None - - def test_compare(self): - assert memoryview("abc") == "abc" - assert memoryview("abc") == bytearray("abc") - assert memoryview("abc") != 3 diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -24,6 +24,17 @@ else: cls.w_safe_runtimerror = cls.space.wrap(sys.version_info < (2, 6)) + def test_builtin_names(self): + import __builtin__ + assert __builtin__.None is None + assert __builtin__.False is False + assert __builtin__.True is True + + assert __builtin__.buffer is buffer + assert __builtin__.bytes is str + assert __builtin__.dict is dict + assert __builtin__.memoryview is memoryview + def test_bytes_alias(self): assert bytes is str assert isinstance(eval("b'hi'"), str) diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -7,7 +7,6 @@ class ByteBuffer(RWBuffer): - def __init__(self, len): self.data = ['\x00'] * len @@ -23,4 +22,4 @@ @unwrap_spec(length=int) def bytebuffer(space, length): - return space.wrap(ByteBuffer(length)) + return space.newbuffer(ByteBuffer(length)) diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -4,7 +4,6 @@ from pypy.interpreter.typedef import TypeDef from rpython.rlib.rstring import UnicodeBuilder, StringBuilder from rpython.tool.sourcetools import func_with_new_name -from rpython.rlib import jit def create_builder(name, strtype, builder_cls): diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,9 +1,9 @@ -from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from pypy.objspace.std.memoryview import W_Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi @@ -39,38 +39,19 @@ copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) -class MiniBuffer(W_Root): - # a different subclass of W_Root for the MiniBuffer, because we - # want a slightly different (simplified) API at the level of Python. +# Override the typedef to narrow down the interface that's exposed to app-level +class MiniBuffer(W_Buffer): def __init__(self, buffer, keepalive=None): - self.buffer = buffer + W_Buffer.__init__(self, buffer) self.keepalive = keepalive - def descr_len(self, space): - return self.buffer.descr_len(space) - - def descr_getitem(self, space, w_index): - return self.buffer.descr_getitem(space, w_index) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - self.buffer.descr_setitem(space, w_index, newstring) - - def descr__buffer__(self, space): - return self.buffer.descr__buffer__(space) - - def descr_str(self, space): - return space.wrap(self.buffer.as_str()) - - MiniBuffer.typedef = TypeDef( "buffer", __module__ = "_cffi_backend", __len__ = interp2app(MiniBuffer.descr_len), __getitem__ = interp2app(MiniBuffer.descr_getitem), __setitem__ = interp2app(MiniBuffer.descr_setitem), - __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), __str__ = interp2app(MiniBuffer.descr_str), ) diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -80,7 +80,6 @@ return (cfield.ctype, cfield.offset) def _copy_from_same(self, cdata, w_ob): - space = self.space if isinstance(w_ob, cdataobj.W_CData): if w_ob.ctype is self and self.size >= 0: misc._raw_memcopy(w_ob._cdata, cdata, self.size) diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -1,4 +1,3 @@ -import weakref from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import keepalive_until_here, specialize -from rpython.rlib.rarithmetic import r_uint, r_ulonglong, is_signed_integer_type +from rpython.rlib.rarithmetic import r_uint, r_ulonglong from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/pypy/module/_file/__init__.py b/pypy/module/_file/__init__.py --- a/pypy/module/_file/__init__.py +++ b/pypy/module/_file/__init__.py @@ -1,7 +1,6 @@ - # Package initialisation from pypy.interpreter.mixedmodule import MixedModule -import sys + class Module(MixedModule): appleveldefs = { diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -200,6 +200,10 @@ assert f.closed == True def test_repr(self): + import sys + if '__pypy__' not in sys.builtin_module_names and \ + sys.version_info < (2, 7, 4): + skip("see cpython issue14161") assert repr(self.file).startswith( " 3: scope_id = space.uint_w(pieces_w[3]) else: scope_id = 0 - if flowinfo < 0 or flowinfo > 0xfffff: - raise OperationError(space.w_OverflowError, space.wrap( - "flowinfo must be 0-1048575.")) - flowinfo = rffi.cast(lltype.Unsigned, flowinfo) + flowinfo = make_unsigned_flowinfo(space, flowinfo) a = addr.lock(_c.sockaddr_in6) rffi.setintfield(a, 'c_sin6_port', rsocket.htons(port)) rffi.setintfield(a, 'c_sin6_flowinfo', rsocket.htonl(flowinfo)) @@ -97,10 +94,7 @@ else: flowinfo = 0 if len(pieces_w) > 3: scope_id = space.uint_w(pieces_w[3]) else: scope_id = 0 - if flowinfo < 0 or flowinfo > 0xfffff: - raise OperationError(space.w_OverflowError, space.wrap( - "flowinfo must be 0-1048575.")) - flowinfo = rffi.cast(lltype.Unsigned, flowinfo) + flowinfo = make_unsigned_flowinfo(space, flowinfo) return rsocket.INET6Address(host, port, flowinfo, scope_id) if rsocket.HAS_AF_UNIX and family == rsocket.AF_UNIX: return rsocket.UNIXAddress(space.str_w(w_address)) @@ -112,10 +106,16 @@ # XXX Hack to seperate rpython and pypy def make_ushort_port(space, port): if port < 0 or port > 0xffff: - raise OperationError(space.w_ValueError, space.wrap( + raise OperationError(space.w_OverflowError, space.wrap( "port must be 0-65535.")) return rffi.cast(rffi.USHORT, port) +def make_unsigned_flowinfo(space, flowinfo): + if flowinfo < 0 or flowinfo > 0xfffff: + raise OperationError(space.w_OverflowError, space.wrap( + "flowinfo must be 0-1048575.")) + return rffi.cast(lltype.Unsigned, flowinfo) + # XXX Hack to seperate rpython and pypy def ipaddr_from_object(space, w_sockaddr): host = space.str_w(space.getitem(w_sockaddr, space.wrap(0))) @@ -536,13 +536,9 @@ @unwrap_spec(family=int, type=int, proto=int) def newsocket(space, w_subtype, family=AF_INET, type=SOCK_STREAM, proto=0): - # XXX If we want to support subclassing the socket type we will need - # something along these lines. But allocate_instance is only defined - # on the standard object space, so this is not really correct. - #sock = space.allocate_instance(W_RSocket, w_subtype) - #Socket.__init__(sock, space, fd, family, type, proto) + sock = space.allocate_instance(W_RSocket, w_subtype) try: - sock = W_RSocket(family, type, proto) + W_RSocket.__init__(sock, family, type, proto) except SocketError, e: raise converted_error(space, e) return space.wrap(sock) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -399,7 +399,7 @@ name = s.getpeername() # Will raise socket.error if not connected assert name[1] == 80 s.close() - + def test_socket_connect_ex(self): import _socket s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) @@ -425,8 +425,13 @@ def test_bigport(self): import _socket s = _socket.socket() - raises(ValueError, s.connect, ("localhost", 1000000)) - raises(ValueError, s.connect, ("localhost", -1)) + exc = raises(OverflowError, s.connect, ("localhost", -1)) + assert "port must be 0-65535." in str(exc.value) + exc = raises(OverflowError, s.connect, ("localhost", 1000000)) + assert "port must be 0-65535." in str(exc.value) + s = _socket.socket(_socket.AF_INET6) + exc = raises(OverflowError, s.connect, ("::1", 1234, 1048576)) + assert "flowinfo must be 0-1048575." in str(exc.value) def test_NtoH(self): import sys @@ -474,6 +479,13 @@ import socket s = socket.socket() + def test_subclass(self): + from _socket import socket + class MySock(socket): + blah = 123 + s = MySock() + assert s.blah == 123 + def test_getsetsockopt(self): import _socket as socket import struct @@ -575,11 +587,11 @@ class AppTestSocketTCP: + HOST = 'localhost' + def setup_class(cls): cls.space = space - HOST = 'localhost' - def setup_method(self, method): w_HOST = space.wrap(self.HOST) self.w_serv = space.appexec([w_socket, w_HOST], @@ -589,6 +601,7 @@ serv.listen(1) return serv ''') + def teardown_method(self, method): if hasattr(self, 'w_serv'): space.appexec([self.w_serv], '(serv): serv.close()') @@ -609,7 +622,7 @@ raises(error, raise_error) def test_recv_send_timeout(self): - from _socket import socket, timeout + from _socket import socket, timeout, SOL_SOCKET, SO_RCVBUF, SO_SNDBUF cli = socket() cli.connect(self.serv.getsockname()) t, addr = self.serv.accept() @@ -629,6 +642,9 @@ assert count is None buf = t.recv(1) assert buf == '?' + # speed up filling the buffers + t.setsockopt(SOL_SOCKET, SO_RCVBUF, 4096) + cli.setsockopt(SOL_SOCKET, SO_SNDBUF, 4096) # test send() timeout count = 0 try: @@ -656,7 +672,7 @@ conn, addr = self.serv.accept() buf = buffer(MSG) conn.send(buf) - buf = array.array('c', ' '*1024) + buf = array.array('c', ' ' * 1024) nbytes = cli.recv_into(buf) assert nbytes == len(MSG) msg = buf.tostring()[:len(MSG)] @@ -671,7 +687,7 @@ conn, addr = self.serv.accept() buf = buffer(MSG) conn.send(buf) - buf = array.array('c', ' '*1024) + buf = array.array('c', ' ' * 1024) nbytes, addr = cli.recvfrom_into(buf) assert nbytes == len(MSG) msg = buf.tostring()[:len(MSG)] @@ -682,6 +698,7 @@ cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) assert cli.family == socket.AF_INET + class AppTestErrno: def setup_class(cls): cls.space = space diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -1,5 +1,5 @@ class AppTestSSL: - spaceconfig = dict(usemodules=('_ssl', '_socket')) + spaceconfig = dict(usemodules=('_ssl', '_socket', 'thread')) def setup_class(cls): import os diff --git a/pypy/module/_ssl/thread_lock.py b/pypy/module/_ssl/thread_lock.py --- a/pypy/module/_ssl/thread_lock.py +++ b/pypy/module/_ssl/thread_lock.py @@ -1,4 +1,5 @@ -from rpython.rlib.ropenssl import * +from rpython.rlib import rthread +from rpython.rlib.ropenssl import libraries from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -22,7 +23,6 @@ # without caring about the GIL. separate_module_source = """ - #include static unsigned int _ssl_locks_count = 0; @@ -62,13 +62,12 @@ } """ -from rpython.rlib import rthread - eci = rthread.eci.merge(ExternalCompilationInfo( separate_module_sources=[separate_module_source], post_include_bits=[ "int _PyPy_SSL_SetupThreads(void);"], export_symbols=['_PyPy_SSL_SetupThreads'], + libraries = libraries, )) _PyPy_SSL_SetupThreads = rffi.llexternal('_PyPy_SSL_SetupThreads', diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -132,6 +132,9 @@ self.len = 0 self.allocated = 0 + def buffer_w(self, space): + return ArrayBuffer(self) + def descr_append(self, space, w_x): """ append(x) @@ -462,9 +465,6 @@ # Misc methods - def descr_buffer(self, space): - return space.wrap(ArrayBuffer(self)) - def descr_repr(self, space): if self.len == 0: return space.wrap("array('%s')" % self.typecode) @@ -508,7 +508,6 @@ __radd__ = interp2app(W_ArrayBase.descr_radd), __rmul__ = interp2app(W_ArrayBase.descr_rmul), - __buffer__ = interp2app(W_ArrayBase.descr_buffer), __repr__ = interp2app(W_ArrayBase.descr_repr), itemsize = GetSetProperty(descr_itemsize), diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1,25 +1,8 @@ import sys -import py -import py.test - - -## class AppTestSimpleArray: -## spaceconfig = dict(usemodules=('array',)) -## def setup_class(cls): -## cls.w_simple_array = cls.space.appexec([], """(): -## import array -## return array.simple_array -## """) - -## def test_simple(self): -## a = self.simple_array(10) -## a[5] = 7.42 -## assert a[5] == 7.42 +import pytest class BaseArrayTests: - - def test_ctor(self): assert len(self.array('c')) == 0 assert len(self.array('i')) == 0 @@ -563,7 +546,6 @@ assert not a > 2*a assert not a >= 2*a - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) @@ -794,7 +776,6 @@ assert img[3, 25] == 3 * 9 - def test_override_from(self): class mya(self.array): def fromlist(self, lst): @@ -879,41 +860,41 @@ def test_assign_object_with_special_methods(self): from array import array - + class Num(object): def __float__(self): return 5.25 - + def __int__(self): return 7 - + class NotNum(object): pass - + class Silly(object): def __float__(self): return None - + def __int__(self): - return None + return None class OldNum: def __float__(self): return 6.25 - + def __int__(self): return 8 - + class OldNotNum: pass - + class OldSilly: def __float__(self): return None - + def __int__(self): return None - + for tc in 'bBhHiIlL': a = array(tc, [0]) raises(TypeError, a.__setitem__, 0, 1.0) @@ -931,7 +912,7 @@ a = array(tc, [0]) a[0] = 1.0 a[0] = 1 - a[0] = Num() + a[0] = Num() assert a[0] == 5.25 raises(TypeError, a.__setitem__, NotNum()) a[0] = OldNum() @@ -939,24 +920,23 @@ raises(TypeError, a.__setitem__, OldNotNum()) raises(TypeError, a.__setitem__, Silly()) raises(TypeError, a.__setitem__, OldSilly()) - + a = array('c', 'hi') a[0] = 'b' assert a[0] == 'b' - + a = array('u', u'hi') a[0] = u'b' assert a[0] == u'b' - + class TestCPythonsOwnArray(BaseArrayTests): - def setup_class(cls): import array cls.array = array.array import struct cls.struct = struct - cls.tempfile = str(py.test.ensuretemp('array').join('tmpfile')) + cls.tempfile = str(pytest.ensuretemp('array').join('tmpfile')) cls.maxint = sys.maxint @@ -969,7 +949,7 @@ return array.array """) cls.w_tempfile = cls.space.wrap( - str(py.test.ensuretemp('array').join('tmpfile'))) + str(pytest.ensuretemp('array').join('tmpfile'))) cls.w_maxint = cls.space.wrap(sys.maxint) def test_buffer_info(self): @@ -1036,11 +1016,11 @@ def test_getitem_only_ints(self): class MyInt(object): - def __init__(self, x): - self.x = x + def __init__(self, x): + self.x = x - def __int__(self): - return self.x + def __int__(self): + return self.x a = self.array('i', [1, 2, 3, 4, 5, 6]) raises(TypeError, "a[MyInt(0)]") @@ -1050,4 +1030,3 @@ class AppTestArrayBuiltinShortcut(AppTestArray): spaceconfig = AppTestArray.spaceconfig.copy() spaceconfig['objspace.std.builtinshortcut'] = True - diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -22,10 +22,10 @@ from pypy.interpreter.nestedscope import Cell from pypy.interpreter.module import Module from pypy.interpreter.function import StaticMethod +from pypy.objspace.std.memoryview import W_MemoryView from pypy.objspace.std.sliceobject import W_SliceObject from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject -from pypy.module.__builtin__.interp_memoryview import W_MemoryView from pypy.module.micronumpy.base import W_NDimArray from rpython.rlib.entrypoint import entrypoint_lowlevel from rpython.rlib.rposix import is_valid_fd, validate_fd diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -1,11 +1,12 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.interpreter.buffer import StringBuffer, SubBuffer +from pypy.interpreter.error import OperationError from pypy.module.cpyext.api import ( cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, PyObjectFields, PyObject) from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref -from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer -from pypy.interpreter.error import OperationError from pypy.module.array.interp_array import ArrayBuffer +from pypy.objspace.std.memoryview import W_Buffer PyBufferObjectStruct = lltype.ForwardReference() @@ -24,7 +25,7 @@ @bootstrap_function def init_bufferobject(space): "Type description of PyBufferObject" - make_typedescr(space.gettypefor(Buffer).instancetypedef, + make_typedescr(space.w_buffer.instancetypedef, basestruct=PyBufferObject.TO, attach=buffer_attach, dealloc=buffer_dealloc, @@ -39,23 +40,26 @@ rffi.setintfield(py_buf, 'c_b_readonly', 1) rffi.setintfield(py_buf, 'c_b_hash', -1) - if isinstance(w_obj, SubBuffer): - py_buf.c_b_offset = w_obj.offset - w_obj = w_obj.buffer + assert isinstance(w_obj, W_Buffer) + buf = w_obj.buf - # If w_obj already allocated a fixed buffer, use it, and keep a - # reference to w_obj. + if isinstance(buf, SubBuffer): + py_buf.c_b_offset = buf.offset + buf = buf.buffer + + # If buf already allocated a fixed buffer, use it, and keep a + # reference to buf. # Otherwise, b_base stays NULL, and we own the b_ptr. - if isinstance(w_obj, StringBuffer): + if isinstance(buf, StringBuffer): py_buf.c_b_base = lltype.nullptr(PyObject.TO) - py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.value)) - py_buf.c_b_size = w_obj.getlength() - elif isinstance(w_obj, ArrayBuffer): - w_base = w_obj.array + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(buf.value)) + py_buf.c_b_size = buf.getlength() + elif isinstance(buf, ArrayBuffer): + w_base = buf.array py_buf.c_b_base = make_ref(space, w_base) - py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, w_obj.array._charbuf_start()) - py_buf.c_b_size = w_obj.getlength() + py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, buf.array._charbuf_start()) + py_buf.c_b_size = buf.getlength() else: raise OperationError(space.w_NotImplementedError, space.wrap( "buffer flavor not supported")) diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -20,7 +20,7 @@ #define PyArrayObject PyObject #define PyArray_Descr PyObject -extern PyTypeObject PyArray_Type; +PyAPI_DATA(PyTypeObject) PyArray_Type; typedef unsigned char npy_bool; typedef unsigned char npy_uint8; diff --git a/pypy/module/cpyext/include/pystate.h b/pypy/module/cpyext/include/pystate.h --- a/pypy/module/cpyext/include/pystate.h +++ b/pypy/module/cpyext/include/pystate.h @@ -21,9 +21,8 @@ #define Py_END_ALLOW_THREADS PyEval_RestoreThread(_save); \ } -typedef - enum {PyGILState_LOCKED, PyGILState_UNLOCKED} - PyGILState_STATE; +enum {PyGILState_LOCKED, PyGILState_UNLOCKED}; +typedef int PyGILState_STATE; #define PyThreadState_GET() PyThreadState_Get() diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -204,16 +204,14 @@ # Before external call is after running Python rffi.aroundstate.before() -PyGILState_STATE = rffi.COpaquePtr('PyGILState_STATE', - typedef='PyGILState_STATE', - compilation_info=CConfig._compilation_info_) +PyGILState_STATE = rffi.INT @cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) def PyGILState_Ensure(space): if rffi.aroundstate.after: # After external call is before entering Python rffi.aroundstate.after() - return lltype.nullptr(PyGILState_STATE.TO) + return rffi.cast(PyGILState_STATE, 0) @cpython_api([PyGILState_STATE], lltype.Void) def PyGILState_Release(space, state): diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -15,7 +15,7 @@ from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.buffer import Buffer as W_Buffer +from pypy.interpreter.buffer import Buffer from pypy.interpreter.argument import Arguments from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize @@ -228,7 +228,7 @@ check_num_args(space, w_args, 0) return space.wrap(generic_cpy_call(space, func_target, w_self)) -class CPyBuffer(W_Buffer): +class CPyBuffer(Buffer): # Similar to Py_buffer def __init__(self, ptr, size, w_obj): @@ -249,7 +249,7 @@ size = generic_cpy_call(space, func_target, w_self, index, ptr) if size < 0: space.fromcache(State).check_and_raise_exception(always=True) - return space.wrap(CPyBuffer(ptr[0], size, w_self)) + return space.newbuffer(CPyBuffer(ptr[0], size, w_self)) def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,7 +1,5 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase -import py -import sys class AppTestArrayModule(AppTestCpythonExtensionBase): enable_leak_checking = False @@ -21,7 +19,7 @@ module = self.import_module(name='array') arr = module.array('i', [1,2,3]) sum = 0 - for i in arr: + for i in arr: sum += i assert sum == 6 @@ -60,4 +58,3 @@ '\x02\0\0\0' '\x03\0\0\0' '\x04\0\0\0') - diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,8 +64,10 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] - # prevent linking with python27.lib - kwds["compile_extra"].append("/DPy_BUILD_CORE") + # prevent linking with PythonXX.lib + w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2] + kwds["link_extra"] = ["/NODEFAULTLIB:Python%d%d.lib" % + (space.int_w(w_maj), space.int_w(w_min))] elif sys.platform == 'darwin': kwds["link_files"] = [str(api_library + '.dylib')] else: @@ -181,6 +183,19 @@ from rpython.rlib.clibffi import get_libc_name cls.w_libc = cls.space.wrap(get_libc_name()) + def setup_method(self, meth): + freeze_refcnts(self) + + def teardown_method(self, meth): + self.cleanup_references(self.space) + # XXX: like AppTestCpythonExtensionBase.teardown_method: + # find out how to disable check_and_print_leaks() if the + # test failed + assert not self.check_and_print_leaks(), ( + "Test leaks or loses object(s). You should also check if " + "the test actually passed in the first place; if it failed " + "it is likely to reach this place.") + def test_load_error(self): import cpyext raises(ImportError, cpyext.load_module, "missing.file", "foo") @@ -356,13 +371,12 @@ for name in self.imported_module_names: self.unimport_module(name) self.cleanup_references(self.space) - if self.check_and_print_leaks(): - assert False, ( - "Test leaks or loses object(s). You should also check if " - "the test actually passed in the first place; if it failed " - "it is likely to reach this place.") - # XXX find out how to disable check_and_print_leaks() if the - # XXX test failed... + # XXX: find out how to disable check_and_print_leaks() if the + # test failed... + assert not self.check_and_print_leaks(), ( + "Test leaks or loses object(s). You should also check if " + "the test actually passed in the first place; if it failed " + "it is likely to reach this place.") class AppTestCpythonExtension(AppTestCpythonExtensionBase): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -1,40 +1,36 @@ import os -import sys +from rpython.rlib import jit +from rpython.rlib.objectmodel import specialize +from rpython.rlib.rstring import rsplit +from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.annlowlevel import llhelper + from pypy.interpreter.baseobjspace import W_Root, DescrMismatch -from pypy.objspace.std.typeobject import W_TypeObject, find_best_base +from pypy.interpreter.error import OperationError from pypy.interpreter.typedef import GetSetProperty +from pypy.module.__builtin__.abstractinst import abstract_issubclass_w +from pypy.module.cpyext import structmemberdefs from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - Py_TPFLAGS_HAVE_GETCHARBUFFER, - build_type_checkers, PyObjectFields) + Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers) +from pypy.module.cpyext.methodobject import ( + PyDescr_NewWrapper, PyCFunction_NewEx, PyCFunction_typedef) +from pypy.module.cpyext.modsupport import convert_method_defs from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, - track_reference, RefcountState, borrow_from) -from pypy.interpreter.module import Module -from pypy.module.cpyext import structmemberdefs -from pypy.module.cpyext.modsupport import convert_method_defs + track_reference, RefcountState, borrow_from, Py_DecRef) +from pypy.module.cpyext.slotdefs import ( + slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.module.cpyext.state import State -from pypy.module.cpyext.methodobject import ( - PyDescr_NewWrapper, PyCFunction_NewEx, PyCFunction_typedef) -from pypy.module.cpyext.pyobject import Py_IncRef, Py_DecRef, _Py_Dealloc from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( PyTypeObjectPtr, PyTypeObject, PyGetSetDef, PyMemberDef, newfunc, PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) -from pypy.module.cpyext.slotdefs import ( - slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) -from pypy.interpreter.buffer import Buffer -from pypy.interpreter.error import OperationError -from rpython.rlib.rstring import rsplit -from rpython.rlib.objectmodel import specialize -from pypy.module.__builtin__.abstractinst import abstract_issubclass_w -from pypy.module.__builtin__.interp_classobj import W_ClassObject -from rpython.rlib import jit +from pypy.objspace.std.typeobject import W_TypeObject, find_best_base From noreply at buildbot.pypy.org Thu Mar 20 20:26:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 20 Mar 2014 20:26:16 +0100 (CET) Subject: [pypy-commit] pypy default: backout 6002c93c0cc0 until properly fixed Message-ID: <20140320192616.09F8F1C02AF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70131:c402f888f629 Date: 2014-03-20 15:25 -0400 http://bitbucket.org/pypy/pypy/changeset/c402f888f629/ Log: backout 6002c93c0cc0 until properly fixed diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -440,11 +440,10 @@ return name - def getbuiltinmodule(self, name, force_init=False, reuse=True): + def getbuiltinmodule(self, name, force_init=False): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: - assert reuse is True try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -463,9 +462,6 @@ # Initialize the module from pypy.interpreter.module import Module if isinstance(w_mod, Module): - if not reuse and w_mod.startup_called: - # Create a copy of the module - w_mod = self.wrap(w_mod.__class__(self, w_name)) w_mod.init(self) # Add the module to sys.modules diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -579,8 +579,7 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True, - reuse=reuse) + return space.getbuiltinmodule(find_info.filename, force_init=True) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -203,6 +203,7 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 + skip("fix me") import sys, marshal old = marshal.loads @@ -222,6 +223,7 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( + skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -578,6 +578,7 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): + skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -585,6 +586,7 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): + skip("fix me") import sys, time oldpath = sys.path time.tzset = "" From noreply at buildbot.pypy.org Thu Mar 20 20:27:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 20 Mar 2014 20:27:21 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: hg merge default Message-ID: <20140320192721.7B21A1C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70132:7cdeff21eeec Date: 2014-03-20 20:26 +0100 http://bitbucket.org/pypy/pypy/changeset/7cdeff21eeec/ Log: hg merge default diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -440,11 +440,10 @@ return name - def getbuiltinmodule(self, name, force_init=False, reuse=True): + def getbuiltinmodule(self, name, force_init=False): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: - assert reuse is True try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -463,9 +462,6 @@ # Initialize the module from pypy.interpreter.module import Module if isinstance(w_mod, Module): - if not reuse and w_mod.startup_called: - # Create a copy of the module - w_mod = self.wrap(w_mod.__class__(self, w_name)) w_mod.init(self) # Add the module to sys.modules diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -579,8 +579,7 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True, - reuse=reuse) + return space.getbuiltinmodule(find_info.filename, force_init=True) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -203,6 +203,7 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 + skip("fix me") import sys, marshal old = marshal.loads @@ -222,6 +223,7 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( + skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -578,6 +578,7 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): + skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -585,6 +586,7 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): + skip("fix me") import sys, time oldpath = sys.path time.tzset = "" From noreply at buildbot.pypy.org Thu Mar 20 20:59:12 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 20 Mar 2014 20:59:12 +0100 (CET) Subject: [pypy-commit] pypy default: simplify Message-ID: <20140320195912.D15D61C1413@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70133:2d49948e8eba Date: 2014-03-20 15:57 -0400 http://bitbucket.org/pypy/pypy/changeset/2d49948e8eba/ Log: simplify diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -308,12 +308,11 @@ descrs = op.getdescr().get_extra_info().extradescrs assert descrs # translation hint descr1 = descrs[0] - descr2 = descrs[1] - if descr1 in self.cached_dict_reads: + try: d = self.cached_dict_reads[descr1] - else: + except KeyError: d = self.cached_dict_reads[descr1] = args_dict() - self.corresponding_array_descrs[descr2] = descr1 + self.corresponding_array_descrs[descrs[1]] = descr1 args = self.optimizer.make_args_key(op) try: res_v = d[args] From noreply at buildbot.pypy.org Fri Mar 21 00:02:31 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 21 Mar 2014 00:02:31 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140320230231.5C1E51D2866@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70134:6ff661c8a2b0 Date: 2014-03-19 16:56 -0700 http://bitbucket.org/pypy/pypy/changeset/6ff661c8a2b0/ Log: merge default diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -441,10 +441,11 @@ return name - def getbuiltinmodule(self, name, force_init=False): + def getbuiltinmodule(self, name, force_init=False, reuse=True): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: + assert reuse is True try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -460,10 +461,18 @@ "getbuiltinmodule() called with non-builtin module %s", name) else: - # And initialize it + # Initialize the module from pypy.interpreter.module import Module if isinstance(w_mod, Module): - w_mod.init(self) + if not reuse and w_mod.startup_called: + # Create a copy of the module + w_mod.getdict(self) # unlazy w_initialdict + w_new = self.wrap(Module(self, w_name)) + self.call_method(w_new.getdict(self), 'update', + w_mod.w_initialdict) + w_mod = w_new + else: + w_mod.init(self) # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) diff --git a/pypy/module/cpyext/include/pystate.h b/pypy/module/cpyext/include/pystate.h --- a/pypy/module/cpyext/include/pystate.h +++ b/pypy/module/cpyext/include/pystate.h @@ -21,9 +21,8 @@ #define Py_END_ALLOW_THREADS PyEval_RestoreThread(_save); \ } -typedef - enum {PyGILState_LOCKED, PyGILState_UNLOCKED} - PyGILState_STATE; +enum {PyGILState_LOCKED, PyGILState_UNLOCKED}; +typedef int PyGILState_STATE; #define PyThreadState_GET() PyThreadState_Get() diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -208,16 +208,14 @@ # Before external call is after running Python rffi.aroundstate.before() -PyGILState_STATE = rffi.COpaquePtr('PyGILState_STATE', - typedef='PyGILState_STATE', - compilation_info=CConfig._compilation_info_) +PyGILState_STATE = rffi.INT @cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) def PyGILState_Ensure(space): if rffi.aroundstate.after: # After external call is before entering Python rffi.aroundstate.after() - return lltype.nullptr(PyGILState_STATE.TO) + return rffi.cast(PyGILState_STATE, 0) @cpython_api([PyGILState_STATE], lltype.Void) def PyGILState_Release(space, state): diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -585,7 +585,8 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True) + return space.getbuiltinmodule(find_info.filename, force_init=True, + reuse=reuse) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -219,7 +219,6 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 - skip("fix me") import sys, marshal old = marshal.loads @@ -239,7 +238,6 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( - skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -652,7 +652,6 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): - skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -660,7 +659,6 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): - skip("fix me") import imp, sys, time oldpath = sys.path time.tzset = "" diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -173,7 +173,7 @@ import marshal types = (float, complex, int, tuple, list, dict, set, frozenset) for cls in types: - print cls + print(cls) class subtype(cls): pass exc = raises(ValueError, marshal.dumps, subtype) diff --git a/pypy/objspace/std/memoryview.py b/pypy/objspace/std/memoryview.py --- a/pypy/objspace/std/memoryview.py +++ b/pypy/objspace/std/memoryview.py @@ -41,14 +41,14 @@ self.buf = buf def buffer_w(self, space): - """Note that memoryview() is very inconsistent in CPython: it - does not support the buffer interface but does support the new - buffer interface: as a result, it is possible to pass memoryview - to e.g. socket.send() but not to file.write(). For simplicity - and consistency, in PyPy memoryview DOES support buffer(), which - means that it is accepted in more places than CPython. """ - self._check_released(space) + Note that memoryview() is very inconsistent in CPython: it does not + support the buffer interface but does support the new buffer + interface: as a result, it is possible to pass memoryview to + e.g. socket.send() but not to file.write(). For simplicity and + consistency, in PyPy memoryview DOES support buffer(), which means + that it is accepted in more places than CPython. + """ return self.buf @staticmethod From noreply at buildbot.pypy.org Fri Mar 21 00:02:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 21 Mar 2014 00:02:33 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140320230233.92BA41D2866@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70135:706e3a4c1ffa Date: 2014-03-20 15:41 -0700 http://bitbucket.org/pypy/pypy/changeset/706e3a4c1ffa/ Log: merge default diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -292,6 +292,10 @@ depending on the compiler settings, the default of 768KB is enough for about 1400 calls. +* since the implementation of dictionary is different, the exact number + which ``__hash__`` and ``__eq__`` are called is different. Since CPython + does not give any specific guarantees either, don't rely on it. + * assignment to ``__class__`` is limited to the cases where it works on CPython 2.5. On CPython 2.6 and 2.7 it works in a bit more cases, which are not supported by PyPy so far. (If needed, diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -114,3 +114,6 @@ app-level. The `Buffer` class is now used by `W_MemoryView` and `W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was an alias to `Buffer`, which was wrappable itself. + +.. branch: improve-consecutive-dict-lookups +Improve the situation when dict lookups of the same key are performed in a chain diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -441,11 +441,10 @@ return name - def getbuiltinmodule(self, name, force_init=False, reuse=True): + def getbuiltinmodule(self, name, force_init=False): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: - assert reuse is True try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -464,15 +463,7 @@ # Initialize the module from pypy.interpreter.module import Module if isinstance(w_mod, Module): - if not reuse and w_mod.startup_called: - # Create a copy of the module - w_mod.getdict(self) # unlazy w_initialdict - w_new = self.wrap(Module(self, w_name)) - self.call_method(w_new.getdict(self), 'update', - w_mod.w_initialdict) - w_mod = w_new - else: - w_mod.init(self) + w_mod.init(self) # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -585,8 +585,7 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True, - reuse=reuse) + return space.getbuiltinmodule(find_info.filename, force_init=True) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -219,6 +219,7 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 + skip("fix me") import sys, marshal old = marshal.loads @@ -238,6 +239,7 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( + skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -652,6 +652,7 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): + skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -660,6 +661,7 @@ def test_reimport_builtin(self): import imp, sys, time + skip("fix me") oldpath = sys.path time.tzset = "" diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -583,6 +583,10 @@ emit_op_getfield_raw_pure = emit_op_getfield_gc emit_op_getfield_gc_pure = emit_op_getfield_gc + def emit_op_increment_debug_counter(self, op, arglocs, regalloc, fcond): + # XXX implement me + return fcond + def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, res_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -849,6 +849,10 @@ prepare_op_getfield_raw_pure = prepare_op_getfield_gc prepare_op_getfield_gc_pure = prepare_op_getfield_gc + def prepare_op_increment_debug_counter(self, op, fcond): + # XXX implement me + return [] + def prepare_op_getinteriorfield_gc(self, op, fcond): t = unpack_interiorfielddescr(op.getdescr()) ofs, itemsize, fieldsize, sign = t diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -553,6 +553,10 @@ else: return self.bh_raw_load_i(struct, offset, descr) + def bh_increment_debug_counter(self, addr): + p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) + p[0] += 1 + def unpack_arraydescr_size(self, arraydescr): from rpython.jit.backend.llsupport.symbolic import get_array_token from rpython.jit.backend.llsupport.descr import get_type_flag, FLAG_SIGNED diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -15,7 +15,7 @@ DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', # 'b'ridge, 'l'abel or # 'e'ntry point - ('i', lltype.Signed), + ('i', lltype.Signed), # first field, at offset 0 ('type', lltype.Char), ('number', lltype.Signed) ) @@ -64,7 +64,6 @@ self.cpu = cpu self.memcpy_addr = 0 self.rtyper = cpu.rtyper - self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') self._debug = False def setup_once(self): @@ -265,14 +264,8 @@ def _append_debugging_code(self, operations, tp, number, token): counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) - box = BoxInt() - box2 = BoxInt() - ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], - box, descr=self.debug_counter_descr), - ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), - ResOperation(rop.SETFIELD_RAW, [c_adr, box2], - None, descr=self.debug_counter_descr)] - operations.extend(ops) + operations.append( + ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr], None)) def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3736,7 +3736,7 @@ assert False, 'should not be called' from rpython.jit.codewriter.effectinfo import EffectInfo - effectinfo = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, EffectInfo.OS_MATH_SQRT) + effectinfo = EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, EffectInfo.OS_MATH_SQRT) FPTR = self.Ptr(self.FuncType([lltype.Float], lltype.Float)) func_ptr = llhelper(FPTR, math_sqrt) FUNC = deref(FPTR) @@ -4338,3 +4338,12 @@ assert rffi.cast(lltype.Signed, a[0]) == -7654 assert rffi.cast(lltype.Signed, a[1]) == 777 lltype.free(a, flavor='raw') + + def test_increment_debug_counter(self): + foo = lltype.malloc(rffi.CArray(lltype.Signed), 1, flavor='raw') + foo[0] = 1789200 + self.execute_operation(rop.INCREMENT_DEBUG_COUNTER, + [ConstInt(rffi.cast(lltype.Signed, foo))], + 'void') + assert foo[0] == 1789201 + lltype.free(foo, flavor='raw') diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -287,7 +287,6 @@ cast_instance_to_gcref(self.cpu.propagate_exception_descr)) ofs = self.cpu.get_ofs_of_frame_field('jf_descr') self.mc.MOV(RawEbpLoc(ofs), imm(propagate_exception_descr)) - self.mc.MOV_rr(eax.value, ebp.value) # self._call_footer() rawstart = self.mc.materialize(self.cpu.asmmemmgr, []) @@ -435,8 +434,8 @@ self.wb_slowpath[withcards + 2 * withfloats] = rawstart @rgc.no_release_gil - def assemble_loop(self, logger, loopname, inputargs, operations, looptoken, - log): + def assemble_loop(self, inputargs, operations, looptoken, log, + loopname, logger): '''adds the following attributes to looptoken: _ll_function_addr (address of the generated func, as an int) _ll_loop_code (debug: addr of the start of the ResOps) @@ -515,8 +514,8 @@ size_excluding_failure_stuff - looppos) @rgc.no_release_gil - def assemble_bridge(self, logger, faildescr, inputargs, operations, - original_loop_token, log): + def assemble_bridge(self, faildescr, inputargs, operations, + original_loop_token, log, logger): if not we_are_translated(): # Arguments should be unique assert len(set(inputargs)) == len(inputargs) @@ -761,6 +760,9 @@ # def _call_footer(self): + # the return value is the jitframe + self.mc.MOV_rr(eax.value, ebp.value) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: self._call_footer_shadowstack(gcrootmap) @@ -1467,6 +1469,14 @@ ofs_loc) self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc) + def genop_discard_increment_debug_counter(self, op, arglocs): + # The argument should be an immediate address. This should + # generate code equivalent to a GETFIELD_RAW, an ADD(1), and a + # SETFIELD_RAW. Here we use the direct from-memory-to-memory + # increment operation of x86. + base_loc, = arglocs + self.mc.INC(mem(base_loc, 0)) + def genop_discard_setfield_gc(self, op, arglocs): base_loc, ofs_loc, size_loc, value_loc = arglocs assert isinstance(size_loc, ImmedLoc) @@ -1823,12 +1833,8 @@ # did just above. ofs = self.cpu.get_ofs_of_frame_field('jf_descr') ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.POP(eax) - mc.MOV_br(ofs2, eax.value) - mc.POP(eax) - mc.MOV_br(ofs, eax.value) - # the return value is the jitframe - mc.MOV_rr(eax.value, ebp.value) + mc.POP_b(ofs2) + mc.POP_b(ofs) self._call_footer() rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -1861,7 +1867,6 @@ # keep that one and kill all the others ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') self.mc.MOV_bi(ofs, 0) - self.mc.MOV_rr(eax.value, ebp.value) # exit function self._call_footer() diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1003,6 +1003,10 @@ consider_getfield_raw_pure = consider_getfield_gc consider_getfield_gc_pure = consider_getfield_gc + def consider_increment_debug_counter(self, op): + base_loc = self.loc(op.getarg(0)) + self.perform_discard(op, [base_loc]) + def consider_getarrayitem_gc(self, op): itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) args = op.getarglist() diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -488,12 +488,22 @@ for possible_code in unrolling_location_codes: if code == possible_code: val = getattr(loc, "value_" + possible_code)() - if self.WORD == 8 and possible_code == 'i' and not rx86.fits_in_32bits(val): - self._load_scratch(val) + # Faking out of certain operations for x86_64 + fits32 = rx86.fits_in_32bits + if possible_code == 'i' and not fits32(val): + self._load_scratch(val) # for 'PUSH(imm)' _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) - else: - methname = name + "_" + possible_code - _rx86_getattr(self, methname)(val) + return + if possible_code == 'j' and not fits32(val): + val = self._addr_as_reg_offset(val) + _rx86_getattr(self, name + "_m")(val) + return + if possible_code == 'm' and not fits32(val[1]): + val = self._fix_static_offset_64_m(val) + if possible_code == 'a' and not fits32(val[3]): + val = self._fix_static_offset_64_a(val) + methname = name + "_" + possible_code + _rx86_getattr(self, methname)(val) return func_with_new_name(INSN, "INSN_" + name) @@ -600,6 +610,7 @@ TEST8 = _binaryop('TEST8') BTS = _binaryop('BTS') + INC = _unaryop('INC') ADD = _binaryop('ADD') SUB = _binaryop('SUB') IMUL = _binaryop('IMUL') diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -93,16 +93,15 @@ def compile_loop(self, inputargs, operations, looptoken, log=True, name='', logger=None): - return self.assembler.assemble_loop(logger, name, inputargs, operations, - looptoken, log=log) + return self.assembler.assemble_loop(inputargs, operations, looptoken, log, + name, logger) def compile_bridge(self, faildescr, inputargs, operations, original_loop_token, log=True, logger=None): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(logger, faildescr, inputargs, - operations, - original_loop_token, log=log) + return self.assembler.assemble_bridge(faildescr, inputargs, operations, + original_loop_token, log, logger) def clear_latest_values(self, count): setitem = self.assembler.fail_boxes_ptr.setitem diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -470,6 +470,9 @@ # ------------------------------ Arithmetic ------------------------------ + INC_m = insn(rex_w, '\xFF', orbyte(0), mem_reg_plus_const(1)) + INC_j = insn(rex_w, '\xFF', orbyte(0), abs_(1)) + ADD_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_ = common_modes(0) OR_ri, OR_rr, OR_rb, _,_,OR_rm, OR_rj, _,_ = common_modes(1) AND_ri,AND_rr,AND_rb,_,_,AND_rm,AND_rj,_,_ = common_modes(4) diff --git a/rpython/jit/backend/x86/test/test_assembler.py b/rpython/jit/backend/x86/test/test_assembler.py --- a/rpython/jit/backend/x86/test/test_assembler.py +++ b/rpython/jit/backend/x86/test/test_assembler.py @@ -55,9 +55,7 @@ asm = cpu.assembler asm.setup_once() asm.setup(looptoken) - self.fm = X86FrameManager(0) - self.xrm = X86XMMRegisterManager(None, frame_manager=self.fm, - assembler=asm) + self.xrm = X86XMMRegisterManager(None, assembler=asm) callback(asm) asm.mc.RET() rawstart = asm.materialize_loop(looptoken) @@ -75,29 +73,6 @@ res = self.do_test(callback) assert res == 42 - def test_push_stack(self): - def callback(asm): - loc = self.fm.frame_pos(5, INT) - asm.mc.SUB_ri(esp.value, 64) - asm.mov(imm(42), loc) - asm.regalloc_push(loc) - asm.regalloc_pop(eax) - asm.mc.ADD_ri(esp.value, 64) - res = self.do_test(callback) - assert res == 42 - - def test_pop_stack(self): - def callback(asm): - loc = self.fm.frame_pos(5, INT) - asm.mc.SUB_ri(esp.value, 64) - asm.mov(imm(42), edx) - asm.regalloc_push(edx) - asm.regalloc_pop(loc) - asm.mov(loc, eax) - asm.mc.ADD_ri(esp.value, 64) - res = self.do_test(callback) - assert res == 42 - def test_simple_xmm(self): def callback(asm): c = ConstFloat(longlong.getfloatstorage(-42.5)) @@ -109,32 +84,8 @@ res = self.do_test(callback) assert res == -42 - def test_push_stack_xmm(self): + def test_xmm_pushes_8_bytes(self): def callback(asm): - c = ConstFloat(longlong.getfloatstorage(-42.5)) - loc = self.xrm.convert_to_imm(c) - loc2 = self.fm.frame_pos(4, FLOAT) - asm.mc.SUB_ri(esp.value, 64) - asm.mov(loc, xmm5) - asm.mov(xmm5, loc2) - asm.regalloc_push(loc2) - asm.regalloc_pop(xmm0) - asm.mc.ADD_ri(esp.value, 64) - asm.mc.CVTTSD2SI(eax, xmm0) - res = self.do_test(callback) - assert res == -42 - - def test_pop_stack_xmm(self): - def callback(asm): - c = ConstFloat(longlong.getfloatstorage(-42.5)) - loc = self.xrm.convert_to_imm(c) - loc2 = self.fm.frame_pos(4, FLOAT) - asm.mc.SUB_ri(esp.value, 64) - asm.mov(loc, xmm5) asm.regalloc_push(xmm5) - asm.regalloc_pop(loc2) - asm.mov(loc2, xmm0) - asm.mc.ADD_ri(esp.value, 64) - asm.mc.CVTTSD2SI(eax, xmm0) - res = self.do_test(callback) - assert res == -42 + asm.mc.ADD(esp, imm(8)) + self.do_test(callback) diff --git a/rpython/jit/backend/x86/test/test_regloc.py b/rpython/jit/backend/x86/test/test_regloc.py --- a/rpython/jit/backend/x86/test/test_regloc.py +++ b/rpython/jit/backend/x86/test/test_regloc.py @@ -373,3 +373,56 @@ '\x59' ) assert cb.getvalue() == expected_instructions + + # ------------------------------------------------------------ + + def test_push_immed64(self): + immed = 0x0123456789ABCDEF + cb = LocationCodeBuilder64() + cb.PUSH(imm(immed)) + # + expected_instructions = ( + # mov r11, 0x0123456789ABCDEF + '\x49\xBB\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # push r11 + '\x41\x53' + ) + assert cb.getvalue() == expected_instructions + + def test_inc_64bit_address_1(self): + base_addr = 0x0123456789ABCDEF + cb = LocationCodeBuilder64() + cb.INC(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr)) + # this case is a INC_j + # + expected_instructions = ( + # mov r11, 0x0123456789ABCDEF + '\x49\xBB\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # inc [r11] + '\x49\xFF\x03' + ) + assert cb.getvalue() == expected_instructions + + def test_inc_64bit_address_2(self): + py.test.skip("there is no unary instruction INSN_a so far") + base_addr = 0x0123456789ABCDEF + cb = LocationCodeBuilder64() + cb.INC(AddressLoc(ImmedLoc(0), edx, 3, base_addr)) + # this case would be a INC_a + xxx + + def test_inc_64bit_address_3(self): + base_addr = 0x0123456789ABCDEF + cb = LocationCodeBuilder64() + cb.INC(AddressLoc(eax, ImmedLoc(0), 0, base_addr)) + # this case is a INC_m + # + expected_instructions = ( + # mov r11, 0x0123456789ABCDEF + '\x49\xBB\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # lea r11, [rax+r11] + '\x4E\x8D\x1C\x18' + # inc [r11] + '\x49\xFF\x03' + ) + assert cb.getvalue() == expected_instructions diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -427,8 +427,8 @@ debug._log = None # assert ops_offset is looptoken._x86_ops_offset - # 2*(getfield_raw/int_add/setfield_raw) + ops + None - assert len(ops_offset) == 2*3 + len(operations) + 1 + # 2*increment_debug_counter + ops + None + assert len(ops_offset) == 2 + len(operations) + 1 assert (ops_offset[operations[0]] <= ops_offset[operations[1]] <= ops_offset[operations[2]] <= diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -178,7 +178,7 @@ return (fnaddr, calldescr) def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None): + extraeffect=None, extradescr=None): """Return the calldescr that describes all calls done by 'op'. This returns a calldescr that we can put in the corresponding call operation in the calling jitcode. It gets an effectinfo @@ -259,6 +259,7 @@ effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op, self.seen), self.cpu, extraeffect, oopspecindex, can_invalidate, call_release_gil_target, + extradescr, ) # assert effectinfo is not None diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -21,6 +21,7 @@ OS_ARRAYCOPY = 1 # "list.ll_arraycopy" OS_STR2UNICODE = 2 # "str.str2unicode" OS_SHRINK_ARRAY = 3 # rgc.ll_shrink_array + OS_DICT_LOOKUP = 4 # ll_dict_lookup # OS_STR_CONCAT = 22 # "stroruni.concat" OS_STR_SLICE = 23 # "stroruni.slice" @@ -88,15 +89,18 @@ # for debugging: _OS_CANRAISE = set([ OS_NONE, OS_STR2UNICODE, OS_LIBFFI_CALL, OS_RAW_MALLOC_VARSIZE_CHAR, - OS_JIT_FORCE_VIRTUAL, OS_SHRINK_ARRAY, + OS_JIT_FORCE_VIRTUAL, OS_SHRINK_ARRAY, OS_DICT_LOOKUP, ]) def __new__(cls, readonly_descrs_fields, readonly_descrs_arrays, + readonly_descrs_interiorfields, write_descrs_fields, write_descrs_arrays, + write_descrs_interiorfields, extraeffect=EF_CAN_RAISE, oopspecindex=OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL): + call_release_gil_target=llmemory.NULL, + extradescrs=None): key = (frozenset_or_none(readonly_descrs_fields), frozenset_or_none(readonly_descrs_arrays), frozenset_or_none(write_descrs_fields), @@ -121,17 +125,21 @@ result = object.__new__(cls) result.readonly_descrs_fields = readonly_descrs_fields result.readonly_descrs_arrays = readonly_descrs_arrays + result.readonly_descrs_interiorfields = readonly_descrs_interiorfields if extraeffect == EffectInfo.EF_LOOPINVARIANT or \ extraeffect == EffectInfo.EF_ELIDABLE_CANNOT_RAISE or \ extraeffect == EffectInfo.EF_ELIDABLE_CAN_RAISE: result.write_descrs_fields = [] result.write_descrs_arrays = [] + result.write_descrs_interiorfields = [] else: result.write_descrs_fields = write_descrs_fields result.write_descrs_arrays = write_descrs_arrays + result.write_descrs_interiorfields = write_descrs_interiorfields result.extraeffect = extraeffect result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex + result.extradescrs = extradescrs result.call_release_gil_target = call_release_gil_target if result.check_can_raise(): assert oopspecindex in cls._OS_CANRAISE @@ -163,7 +171,7 @@ return None return frozenset(x) -EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, +EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, None, None, EffectInfo.EF_RANDOM_EFFECTS, can_invalidate=True) @@ -172,19 +180,24 @@ extraeffect=EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL): + call_release_gil_target=llmemory.NULL, + extradescr=None): from rpython.translator.backendopt.writeanalyze import top_set if effects is top_set or extraeffect == EffectInfo.EF_RANDOM_EFFECTS: readonly_descrs_fields = None readonly_descrs_arrays = None + readonly_descrs_interiorfields = None write_descrs_fields = None write_descrs_arrays = None + write_descrs_interiorfields = None extraeffect = EffectInfo.EF_RANDOM_EFFECTS else: readonly_descrs_fields = [] readonly_descrs_arrays = [] + readonly_descrs_interiorfields = [] write_descrs_fields = [] write_descrs_arrays = [] + write_descrs_interiorfields = [] def add_struct(descrs_fields, (_, T, fieldname)): T = deref(T) @@ -198,6 +211,17 @@ descr = cpu.arraydescrof(ARRAY) descrs_arrays.append(descr) + def add_interiorfield(descrs_interiorfields, (_, T, fieldname)): + T = deref(T) + if not isinstance(T, lltype.Array): + return # let's not consider structs for now + if not consider_array(T): + return + if getattr(T.OF, fieldname) is lltype.Void: + return + descr = cpu.interiorfielddescrof(T, fieldname) + descrs_interiorfields.append(descr) + for tup in effects: if tup[0] == "struct": add_struct(write_descrs_fields, tup) @@ -205,6 +229,12 @@ tupw = ("struct",) + tup[1:] if tupw not in effects: add_struct(readonly_descrs_fields, tup) + elif tup[0] == "interiorfield": + add_interiorfield(write_descrs_interiorfields, tup) + elif tup[0] == "readinteriorfield": + tupw = ('interiorfield',) + tup[1:] + if tupw not in effects: + add_interiorfield(readonly_descrs_interiorfields, tup) elif tup[0] == "array": add_array(write_descrs_arrays, tup) elif tup[0] == "readarray": @@ -216,12 +246,15 @@ # return EffectInfo(readonly_descrs_fields, readonly_descrs_arrays, + readonly_descrs_interiorfields, write_descrs_fields, write_descrs_arrays, + write_descrs_interiorfields, extraeffect, oopspecindex, can_invalidate, - call_release_gil_target) + call_release_gil_target, + extradescr) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -403,6 +403,9 @@ prepare = self._handle_math_sqrt_call elif oopspec_name.startswith('rgc.'): prepare = self._handle_rgc_call + elif oopspec_name.endswith('dict.lookup'): + # also ordereddict.lookup + prepare = self._handle_dict_lookup_call else: prepare = self.prepare_builtin_call try: @@ -1680,9 +1683,11 @@ # ---------- # Strings and Unicodes. - def _handle_oopspec_call(self, op, args, oopspecindex, extraeffect=None): + def _handle_oopspec_call(self, op, args, oopspecindex, extraeffect=None, + extradescr=None): calldescr = self.callcontrol.getcalldescr(op, oopspecindex, - extraeffect) + extraeffect, + extradescr=extradescr) if extraeffect is not None: assert (is_test_calldescr(calldescr) # for tests or calldescr.get_extra_info().extraeffect == extraeffect) @@ -1846,6 +1851,14 @@ return self._handle_oopspec_call(op, args, EffectInfo.OS_MATH_SQRT, EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + def _handle_dict_lookup_call(self, op, oopspec_name, args): + extradescr1 = self.cpu.fielddescrof(op.args[1].concretetype.TO, + 'entries') + extradescr2 = self.cpu.interiorfielddescrof( + op.args[1].concretetype.TO.entries.TO, 'key') + return self._handle_oopspec_call(op, args, EffectInfo.OS_DICT_LOOKUP, + extradescr=[extradescr1, extradescr2]) + def _handle_rgc_call(self, op, oopspec_name, args): if oopspec_name == 'rgc.ll_shrink_array': return self._handle_oopspec_call(op, args, EffectInfo.OS_SHRINK_ARRAY, EffectInfo.EF_CAN_RAISE) diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -60,7 +60,8 @@ class FakeResidualCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None, + extradescr=None): return 'calldescr' def calldescr_canraise(self, calldescr): return True @@ -117,7 +118,8 @@ self.callinfocollection = FakeCallInfoCollection() def guess_call_kind(self, op): return 'builtin' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None, + extradescr=None): assert oopspecindex is not None # in this test EI = effectinfo.EffectInfo if oopspecindex != EI.OS_ARRAYCOPY: diff --git a/rpython/jit/codewriter/test/test_list.py b/rpython/jit/codewriter/test/test_list.py --- a/rpython/jit/codewriter/test/test_list.py +++ b/rpython/jit/codewriter/test/test_list.py @@ -37,7 +37,8 @@ class FakeCallControl: class getcalldescr(AbstractDescr): - def __init__(self, op, oopspecindex=0, extraeffect=None): + def __init__(self, op, oopspecindex=0, extraeffect=None, + extradescr=None): self.op = op self.oopspecindex = oopspecindex def __repr__(self): diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -332,6 +332,7 @@ continue if value in (rop.FORCE_TOKEN, rop.CALL_ASSEMBLER, + rop.INCREMENT_DEBUG_COUNTER, rop.COND_CALL_GC_WB, rop.COND_CALL_GC_WB_ARRAY, rop.DEBUG_MERGE_POINT, diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -1,8 +1,10 @@ import os +from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.jit.metainterp.optimizeopt.util import args_dict from rpython.jit.metainterp.history import Const from rpython.jit.metainterp.jitexc import JitException -from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS +from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS, REMOVED from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib.objectmodel import we_are_translated @@ -173,6 +175,10 @@ self.cached_fields = {} # cached array items: {array descr: {index: CachedField}} self.cached_arrayitems = {} + # cached dict items: {dict descr: {(optval, index): box-or-const}} + self.cached_dict_reads = {} + # cache of corresponding array descrs + self.corresponding_array_descrs = {} # self._lazy_setfields_and_arrayitems = [] self._remove_guard_not_invalidated = False @@ -180,9 +186,13 @@ self.postponed_op = None def force_at_end_of_preamble(self): + self.cached_dict_reads.clear() + self.corresponding_array_descrs.clear() self.force_all_lazy_setfields_and_arrayitems() def flush(self): + self.cached_dict_reads.clear() + self.corresponding_array_descrs.clear() self.force_all_lazy_setfields_and_arrayitems() self.emit_postponed_op() @@ -214,6 +224,7 @@ del self._lazy_setfields_and_arrayitems[:] self.cached_fields.clear() self.cached_arrayitems.clear() + self.cached_dict_reads.clear() def field_cache(self, descr): try: @@ -282,6 +293,44 @@ self.force_all_lazy_setfields_and_arrayitems() self.clean_caches() + def optimize_CALL(self, op): + # dispatch based on 'oopspecindex' to a method that handles + # specifically the given oopspec call. For non-oopspec calls, + # oopspecindex is just zero. + effectinfo = op.getdescr().get_extra_info() + oopspecindex = effectinfo.oopspecindex + if oopspecindex == EffectInfo.OS_DICT_LOOKUP: + if self._optimize_CALL_DICT_LOOKUP(op): + return + self.emit_operation(op) + + def _optimize_CALL_DICT_LOOKUP(self, op): + descrs = op.getdescr().get_extra_info().extradescrs + assert descrs # translation hint + descr1 = descrs[0] + try: + d = self.cached_dict_reads[descr1] + except KeyError: + d = self.cached_dict_reads[descr1] = args_dict() + self.corresponding_array_descrs[descrs[1]] = descr1 + args = self.optimizer.make_args_key(op) + try: + res_v = d[args] + except KeyError: + d[args] = self.getvalue(op.result) + return False + else: + self.make_equal_to(op.result, res_v) + self.last_emitted_operation = REMOVED + return True + + def optimize_GUARD_NO_EXCEPTION(self, op): + if self.last_emitted_operation is REMOVED: + return + self.emit_operation(op) + + optimize_GUARD_EXCEPTION = optimize_GUARD_NO_EXCEPTION + def force_from_effectinfo(self, effectinfo): # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large @@ -290,9 +339,20 @@ for arraydescr in effectinfo.readonly_descrs_arrays: self.force_lazy_setarrayitem(arraydescr) for fielddescr in effectinfo.write_descrs_fields: + try: + del self.cached_dict_reads[fielddescr] + except KeyError: + pass self.force_lazy_setfield(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) + for descr in effectinfo.write_descrs_interiorfields: + if descr in self.corresponding_array_descrs: + dictdescr = self.corresponding_array_descrs.pop(descr) + try: + del self.cached_dict_reads[dictdescr] + except KeyError: + pass # someone did it already if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info self.force_lazy_setfield(vrefinfo.descr_forced) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5444,6 +5444,21 @@ """ self.optimize_loop(ops, expected) + def test_consecutive_getinteriorfields(self): + py.test.skip("we want this to pass") + ops = """ + [p0, i0] + i1 = getinteriorfield_gc(p0, i0, descr=valuedescr) + i2 = getinteriorfield_gc(p0, i0, descr=valuedescr) + jump(i1, i2) + """ + expected = """ + [p0, i0] + i1 = getinteriorfield_gc(p0, i0, descr=valuedescr) + jump(i1, i1) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -181,28 +181,29 @@ plaincalldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo.MOST_GENERAL) nonwritedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [])) + EffectInfo([], [], [], [], [], [])) writeadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [adescr], [])) + EffectInfo([], [], [], [adescr], [], [])) writearraydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [adescr], [arraydescr])) + EffectInfo([], [], [], [adescr], [arraydescr], + [])) readadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([adescr], [], [], [])) + EffectInfo([adescr], [], [], [], [], [])) mayforcevirtdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([nextdescr], [], [], [], + EffectInfo([nextdescr], [], [], [], [], [], EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE, can_invalidate=True)) arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [arraydescr], [], [arraydescr], + EffectInfo([], [arraydescr], [], [], [arraydescr], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_ARRAYCOPY)) raw_malloc_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], + EffectInfo([], [], [], [], [], [], EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR)) raw_free_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], + EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_RAW_FREE)) @@ -251,17 +252,18 @@ _oopspecindex = getattr(EffectInfo, _os) locals()[_name] = \ cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=_oopspecindex)) # _oopspecindex = getattr(EffectInfo, _os.replace('STR', 'UNI')) locals()[_name.replace('str', 'unicode')] = \ cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=_oopspecindex)) s2u_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], oopspecindex=EffectInfo.OS_STR2UNICODE)) + EffectInfo([], [], [], [], [], [], + oopspecindex=EffectInfo.OS_STR2UNICODE)) # class LoopToken(AbstractDescr): @@ -277,7 +279,7 @@ virtualtokendescr = vrefinfo.descr_virtual_token virtualforceddescr = vrefinfo.descr_forced FUNC = lltype.FuncType([], lltype.Void) - ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + ei = EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, can_invalidate=False, oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) clear_vable = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, ei) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -494,6 +494,7 @@ # must be forced, however we need to execute it anyway '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- + 'INCREMENT_DEBUG_COUNTER/1', 'SETARRAYITEM_GC/3d', 'SETARRAYITEM_RAW/3d', 'SETINTERIORFIELD_GC/3d', diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -193,6 +193,107 @@ self.check_simple_loop({'int_sub': 1, 'int_gt': 1, 'guard_true': 1, 'jump': 1}) + def test_dict_two_lookups(self): + driver = JitDriver(greens = [], reds = 'auto') + d = {'a': 3, 'b': 4} + indexes = ['a', 'b'] + + def f(n): + s = 0 + while n > 0: + driver.jit_merge_point() + s += d[indexes[n & 1]] + s += d[indexes[n & 1]] + n -= 1 + return s + + self.meta_interp(f, [10]) + # XXX should be one getinteriorfield_gc + self.check_simple_loop(call=1, getinteriorfield_gc=2, + guard_no_exception=1) + + def test_ordered_dict_two_lookups(self): + driver = JitDriver(greens = [], reds = 'auto') + d = OrderedDict() + d['a'] = 3 + d['b'] = 4 + indexes = ['a', 'b'] + + def f(n): + s = 0 + while n > 0: + driver.jit_merge_point() + s += d[indexes[n & 1]] + s += d[indexes[n & 1]] + n -= 1 + return s + + self.meta_interp(f, [10]) + # XXX should be one getinteriorfield_gc + self.check_simple_loop(call=1, getinteriorfield_gc=2, + guard_no_exception=1) + + def test_dict_insert_invalidates_caches(self): + driver = JitDriver(greens = [], reds = 'auto') + indexes = ['aa', 'b', 'cc'] + + def f(n): + d = {'aa': 3, 'b': 4, 'cc': 5} + s = 0 + while n > 0: + driver.jit_merge_point() + index = indexes[n & 1] + s += d[index] + d['aa'] += 1 # this will invalidate the index + s += d[index] + n -= 1 + return s + + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_simple_loop(call=5) + + def test_dict_array_write_invalidates_caches(self): + driver = JitDriver(greens = [], reds = 'auto') + indexes = ['aa', 'b', 'cc'] + + def f(n): + d = {'aa': 3, 'b': 4, 'cc': 5} + s = 0 + while n > 0: + driver.jit_merge_point() + index = indexes[n & 1] + s += d[index] + del d['cc'] + s += d[index] + d['cc'] = 3 + n -= 1 + return s + + exp = f(10) + res = self.meta_interp(f, [10]) + assert res == exp + self.check_simple_loop(call=7) + + def test_dict_double_lookup_2(self): + driver = JitDriver(greens = [], reds = 'auto') + indexes = ['aa', 'b', 'cc'] + + def f(n): + d = {'aa': 3, 'b': 4, 'cc': 5} + s = 0 + while n > 0: + driver.jit_merge_point() + index = indexes[n & 1] + s += d[index] + d[index] += 1 + n -= 1 + return s + + res = self.meta_interp(f, [10]) + assert res == f(10) + self.check_simple_loop(call=3) + class TestLLtype(DictTests, LLJitMixin): pass diff --git a/rpython/jit/metainterp/virtualizable.py b/rpython/jit/metainterp/virtualizable.py --- a/rpython/jit/metainterp/virtualizable.py +++ b/rpython/jit/metainterp/virtualizable.py @@ -302,7 +302,7 @@ self.clear_vable_ptr = self.warmrunnerdesc.helper_func( FUNCPTR, self.clear_vable_token) FUNC = FUNCPTR.TO - ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + ei = EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, can_invalidate=False, oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -14,6 +14,11 @@ includes = ['stdio.h', 'sys/types.h'] if os.name == "posix": includes += ['unistd.h'] + ftruncate = 'ftruncate' + fileno = 'fileno' +else: + ftruncate = '_chsize' + fileno = '_fileno' eci = ExternalCompilationInfo(includes=includes) def llexternal(*args, **kwargs): @@ -41,10 +46,10 @@ c_fseek = llexternal('fseek', [lltype.Ptr(FILE), rffi.LONG, rffi.INT], rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) -c_fileno = llexternal('fileno', [lltype.Ptr(FILE)], rffi.INT) +c_fileno = llexternal(fileno, [lltype.Ptr(FILE)], rffi.INT) c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) -c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT, macro=True) +c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, lltype.Ptr(FILE)], rffi.CCHARP) diff --git a/rpython/rtyper/lltypesystem/rdict.py b/rpython/rtyper/lltypesystem/rdict.py --- a/rpython/rtyper/lltypesystem/rdict.py +++ b/rpython/rtyper/lltypesystem/rdict.py @@ -569,6 +569,7 @@ PERTURB_SHIFT = 5 @jit.look_inside_iff(lambda d, key, hash: jit.isvirtual(d) and jit.isconstant(key)) + at jit.oopspec('dict.lookup(d, key, hash)') def ll_dict_lookup(d, key, hash): entries = d.entries ENTRIES = lltype.typeOf(entries).TO diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -709,6 +709,7 @@ @jit.look_inside_iff(lambda d, key, hash, store_flag, T: jit.isvirtual(d) and jit.isconstant(key)) + at jit.oopspec('ordereddict.lookup(d, key, hash, store_flag, T)') def ll_dict_lookup(d, key, hash, store_flag, T): INDEXES = _ll_ptr_to_array_of(T) entries = d.entries diff --git a/rpython/translator/backendopt/test/test_writeanalyze.py b/rpython/translator/backendopt/test/test_writeanalyze.py --- a/rpython/translator/backendopt/test/test_writeanalyze.py +++ b/rpython/translator/backendopt/test/test_writeanalyze.py @@ -353,3 +353,23 @@ result = wa.analyze(fgraph.startblock.operations[-1]) assert list(result) == [("struct", lltype.Ptr(S), "x")] + + def test_interiorfield(self): + A = lltype.GcArray(lltype.Struct('x', ('x', lltype.Signed), + ('y', lltype.Signed))) + + def g(x): + a = lltype.malloc(A, 1) + a[0].y = 3 + return f(a, x) + + def f(a, x): + a[0].x = x + return a[0].y + + t, wa = self.translate(g, [int]) + ggraph = graphof(t, g) + result = wa.analyze(ggraph.startblock.operations[-1]) + res = list(result) + assert ('readinteriorfield', lltype.Ptr(A), 'y') in res + assert ('interiorfield', lltype.Ptr(A), 'x') in res diff --git a/rpython/translator/backendopt/writeanalyze.py b/rpython/translator/backendopt/writeanalyze.py --- a/rpython/translator/backendopt/writeanalyze.py +++ b/rpython/translator/backendopt/writeanalyze.py @@ -1,4 +1,4 @@ -from rpython.flowspace.model import Variable +from rpython.flowspace.model import Variable, Constant from rpython.translator.backendopt import graphanalyze top_set = object() @@ -37,6 +37,12 @@ return top_set return result1.union(result2) + def _getinteriorname(self, op): + if (isinstance(op.args[1], Constant) and + isinstance(op.args[1].value, str)): + return op.args[1].value + return op.args[2].value + def analyze_simple_operation(self, op, graphinfo): if op.opname == "setfield": if graphinfo is None or not graphinfo.is_fresh_malloc(op.args[0]): @@ -45,11 +51,18 @@ elif op.opname == "setarrayitem": if graphinfo is None or not graphinfo.is_fresh_malloc(op.args[0]): return self._array_result(op.args[0].concretetype) + elif op.opname == "setinteriorfield": + if graphinfo is None or not graphinfo.is_fresh_malloc(op.args[0]): + name = self._getinteriorname(op) + return self._interiorfield_result(op.args[0].concretetype, name) return empty_set def _array_result(self, TYPE): return frozenset([("array", TYPE)]) + def _interiorfield_result(self, TYPE, fieldname): + return frozenset([("interiorfield", TYPE, fieldname)]) + def compute_graph_info(self, graph): return FreshMallocs(graph) @@ -99,4 +112,8 @@ elif op.opname == "getarrayitem": return frozenset([ ("readarray", op.args[0].concretetype)]) + elif op.opname == "getinteriorfield": + name = self._getinteriorname(op) + return frozenset([("readinteriorfield", op.args[0].concretetype, + name)]) return WriteAnalyzer.analyze_simple_operation(self, op, graphinfo) From noreply at buildbot.pypy.org Fri Mar 21 00:02:34 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 21 Mar 2014 00:02:34 +0100 (CET) Subject: [pypy-commit] pypy py3k: skip for now: requires the not NotImplemented audioop.lin2ulaw Message-ID: <20140320230234.EE87F1D2866@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70136:eed6c50a88fc Date: 2014-03-20 16:00 -0700 http://bitbucket.org/pypy/pypy/changeset/eed6c50a88fc/ Log: skip for now: requires the not NotImplemented audioop.lin2ulaw diff --git a/lib-python/3/test/test_sunau.py b/lib-python/3/test/test_sunau.py --- a/lib-python/3/test/test_sunau.py +++ b/lib-python/3/test/test_sunau.py @@ -1,4 +1,4 @@ -from test.support import run_unittest, TESTFN +from test.support import run_unittest, TESTFN, impl_detail import unittest import os @@ -41,6 +41,7 @@ self.assertEqual(self.f.readframes(nframes), output) self.f.close() + @impl_detail(pypy=False) def test_ulaw(self): self.f = sunau.open(TESTFN, 'w') self.f.setnchannels(nchannels) From noreply at buildbot.pypy.org Fri Mar 21 00:02:36 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 21 Mar 2014 00:02:36 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to buffer refactoring Message-ID: <20140320230236.363E21D2866@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70137:f890d933e737 Date: 2014-03-20 16:00 -0700 http://bitbucket.org/pypy/pypy/changeset/f890d933e737/ Log: adapt to buffer refactoring diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -136,7 +136,7 @@ return self.value def __buffer__(self): - return self._buffer.__buffer__() + return memoryview(self._buffer) def _get_b_base(self): try: From noreply at buildbot.pypy.org Fri Mar 21 00:02:37 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 21 Mar 2014 00:02:37 +0100 (CET) Subject: [pypy-commit] pypy py3k: add a TODO comment Message-ID: <20140320230237.9B7451D2866@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70138:5f6d9d6505b2 Date: 2014-03-20 16:01 -0700 http://bitbucket.org/pypy/pypy/changeset/5f6d9d6505b2/ Log: add a TODO comment diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -781,6 +781,8 @@ return space.newlist(w_type.get_subclasses()) def descr___prepare__(space, __args__): + # XXX: space.newdict(strdict=True)? (XXX: which should be + # UnicodeDictStrategy but is currently BytesDictStrategy) return space.newdict() # ____________________________________________________________ From noreply at buildbot.pypy.org Fri Mar 21 01:34:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 21 Mar 2014 01:34:35 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_flatten/test_longlong on 32bit Message-ID: <20140321003435.52DB41C02AF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70139:d5dd9d0ba5c4 Date: 2014-03-21 00:33 +0000 http://bitbucket.org/pypy/pypy/changeset/d5dd9d0ba5c4/ Log: fix test_flatten/test_longlong on 32bit diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -73,7 +73,7 @@ def guess_call_kind(self, op): return 'residual' def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None): + extraeffect=None, extradescr=None): try: name = op.args[0].value._obj._name if 'cannot_raise' in name or name.startswith('cast_'): diff --git a/rpython/jit/codewriter/test/test_longlong.py b/rpython/jit/codewriter/test/test_longlong.py --- a/rpython/jit/codewriter/test/test_longlong.py +++ b/rpython/jit/codewriter/test/test_longlong.py @@ -17,7 +17,7 @@ class FakeBuiltinCallControl: def guess_call_kind(self, op): return 'builtin' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None, extradescr=None): assert oopspecindex is not None # in this test return 'calldescr-%d' % oopspecindex def calldescr_canraise(self, calldescr): From noreply at buildbot.pypy.org Fri Mar 21 07:55:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 21 Mar 2014 07:55:52 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Un-comment-out the implementation of stm_ignored, even if right now it's not used Message-ID: <20140321065552.66EF91C029E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70140:5e13c2ff77d6 Date: 2014-03-21 07:48 +0100 http://bitbucket.org/pypy/pypy/changeset/5e13c2ff77d6/ Log: Un-comment-out the implementation of stm_ignored, even if right now it's not used diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -53,13 +53,13 @@ "unbalanced stm_ignore_start/stm_ignore_stop in block") def gct_stm_ignored_start(self, hop): - #assert not self.in_stm_ignored - #self.in_stm_ignored = True + assert not self.in_stm_ignored + self.in_stm_ignored = True self.default(hop) def gct_stm_ignored_stop(self, hop): - #assert self.in_stm_ignored - #self.in_stm_ignored = False + assert self.in_stm_ignored + self.in_stm_ignored = False self.default(hop) def var_needs_set_transform(self, var): diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -76,11 +76,11 @@ [op.args[0]], v_none)) transformer.read_barrier_counts += 1 elif op.opname == 'stm_ignored_start': - pass #assert stm_ignored == False - #stm_ignored = True + assert stm_ignored == False + stm_ignored = True elif op.opname == 'stm_ignored_stop': - pass #assert stm_ignored == True - #stm_ignored = False + assert stm_ignored == True + stm_ignored = False newops.append(op) assert stm_ignored == False block.operations = newops From noreply at buildbot.pypy.org Fri Mar 21 07:55:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 21 Mar 2014 07:55:53 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Skip or remove the remaining tests Message-ID: <20140321065553.CF4321C029E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70141:53a2a9ecde3e Date: 2014-03-21 07:54 +0100 http://bitbucket.org/pypy/pypy/changeset/53a2a9ecde3e/ Log: Skip or remove the remaining tests diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -1,3 +1,4 @@ +import py from rpython.rlib import rstm, rgc, objectmodel from rpython.rlib.debug import debug_print from rpython.rtyper.lltypesystem import lltype, rffi @@ -190,6 +191,7 @@ assert '12\n12\n' in data, "got: %r" % (data,) def test_prebuilt_nongc(self): + py.test.skip("stmframework: GC pointer written into a non-GC location") def check(foobar, retry_counter): return 0 # do nothing from rpython.rtyper.lltypesystem import lltype @@ -380,24 +382,6 @@ assert match assert int(match.group(1)) < 20 - def test_gc_writebarrier(self): - class X(object): - pass - prebuilt = X() - prebuilt.foo = 42 - - def main(argv): - llop.gc_writebarrier(lltype.Void, prebuilt) - debug_print(objectmodel.current_object_addr_as_int(prebuilt)) - prebuilt.foo = 43 - debug_print(objectmodel.current_object_addr_as_int(prebuilt)) - return 0 - - t, cbuilder = self.compile(main) - data, dataerr = cbuilder.cmdexec('', err=True) - lines = dataerr.split('\n') - assert lines[0] == lines[1] - def test_dtoa(self): def main(argv): a = len(argv) * 0.2 From noreply at buildbot.pypy.org Fri Mar 21 09:40:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 21 Mar 2014 09:40:22 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Remove this bogus setting; always use 10000 as the default for Message-ID: <20140321084022.B912B1C02CF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70142:b5920bc6dd6c Date: 2014-03-21 09:39 +0100 http://bitbucket.org/pypy/pypy/changeset/b5920bc6dd6c/ Log: Remove this bogus setting; always use 10000 as the default for sys.setcheckinterval(). Use floating-point fractions internally. diff --git a/pypy/module/thread/stm.py b/pypy/module/thread/stm.py --- a/pypy/module/thread/stm.py +++ b/pypy/module/thread/stm.py @@ -63,10 +63,6 @@ """NOT_RPYTHON: set up a mechanism to send to the C code the value set by space.actionflag.setcheckinterval().""" # - # Set the default checkinterval to 200000, found by exploration to - # be a good default value. XXX do some more in-depth tests - space.actionflag.setcheckinterval(200000) - # def setcheckinterval_callback(): self.configure_transaction_length(space) # @@ -110,7 +106,7 @@ def configure_transaction_length(self, space): if self.threads_running: interval = space.actionflag.getcheckinterval() - rstm.set_transaction_length(interval) + rstm.set_transaction_length(interval / 10000.0) # ____________________________________________________________ diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -61,8 +61,8 @@ llop.stm_should_break_transaction(lltype.Bool)) @dont_look_inside -def set_transaction_length(length): - llop.stm_set_transaction_length(lltype.Void, length) +def set_transaction_length(fraction): + llop.stm_set_transaction_length(lltype.Void, float(fraction)) @dont_look_inside def increment_atomic(): diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -35,11 +35,11 @@ static long pypy_transaction_length; -void pypy_stm_set_transaction_length(long percentage) +void pypy_stm_set_transaction_length(double fraction) { /* the value '100' means 'use the default'. Other values are interpreted proportionally, up to some maximum. */ - long low_fill_mark = LOW_FILL_MARK * percentage / 100; + long low_fill_mark = (long)(LOW_FILL_MARK * fraction); if (low_fill_mark > NURSERY_SIZE / 2) low_fill_mark = NURSERY_SIZE / 2; pypy_transaction_length = low_fill_mark; @@ -50,7 +50,7 @@ stm_setup(); pypy_stm_register_thread_local(); pypy_stm_ready_atomic = 1; - pypy_stm_set_transaction_length(100); + pypy_stm_set_transaction_length(1.0); pypy_stm_start_inevitable_if_not_atomic(); } diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -46,7 +46,7 @@ } long pypy_stm_enter_callback_call(void); void pypy_stm_leave_callback_call(long); -void pypy_stm_set_transaction_length(long); +void pypy_stm_set_transaction_length(double); void pypy_stm_perform_transaction(object_t *, int(object_t *, int)); static inline int pypy_stm_should_break_transaction(void) diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -98,7 +98,7 @@ def test_set_transaction_length(self): def entry_point(argv): - rstm.set_transaction_length(123) + rstm.set_transaction_length(0.123) return 0 t, cbuilder = self.compile(entry_point) cbuilder.cmdexec('') From noreply at buildbot.pypy.org Fri Mar 21 11:20:18 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 21 Mar 2014 11:20:18 +0100 (CET) Subject: [pypy-commit] pypy default: add more debugging info for InvalidLoops Message-ID: <20140321102019.010BE1C02EA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70143:ca27e8ab9f84 Date: 2014-03-21 12:18 +0200 http://bitbucket.org/pypy/pypy/changeset/ca27e8ab9f84/ Log: add more debugging info for InvalidLoops diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -242,8 +242,9 @@ box = value.box assert isinstance(box, Const) if not box.same_constant(constbox): - raise InvalidLoop('A GUARD_{VALUE,TRUE,FALSE} was proven to' + - 'always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_{VALUE,TRUE,FALSE} (%s) was proven ' + 'to always fail' % r) return if emit_operation: self.emit_operation(op) @@ -255,7 +256,9 @@ if value.is_null(): return elif value.is_nonnull(): - raise InvalidLoop('A GUARD_ISNULL was proven to always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_ISNULL (%s) was proven to always fail' + % r) self.emit_operation(op) value.make_constant(self.optimizer.cpu.ts.CONST_NULL) @@ -264,7 +267,9 @@ if value.is_nonnull(): return elif value.is_null(): - raise InvalidLoop('A GUARD_NONNULL was proven to always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_NONNULL (%s) was proven to always fail' + % r) self.emit_operation(op) value.make_nonnull(op) @@ -292,7 +297,8 @@ assert previous_classbox is not None assert expected_classbox is not None if not previous_classbox.same_constant(expected_classbox): - raise InvalidLoop('A GUARD_VALUE was proven to always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_VALUE (%s) was proven to always fail' % r) op = old_guard_op.copy_and_change(rop.GUARD_VALUE, args = [old_guard_op.getarg(0), op.getarg(1)]) self.optimizer.replaces_guard[op] = old_guard_op @@ -333,7 +339,9 @@ if realclassbox is not None: if realclassbox.same_constant(expectedclassbox): return - raise InvalidLoop('A GUARD_CLASS was proven to always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_CLASS (%s) was proven to always fail' + % r) if value.last_guard: # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value. @@ -356,8 +364,9 @@ def optimize_GUARD_NONNULL_CLASS(self, op): value = self.getvalue(op.getarg(0)) if value.is_null(): - raise InvalidLoop('A GUARD_NONNULL_CLASS was proven to always ' + - 'fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_NONNULL_CLASS (%s) was proven to ' + 'always fail' % r) self.optimize_GUARD_CLASS(op) def optimize_CALL_LOOPINVARIANT(self, op): From noreply at buildbot.pypy.org Fri Mar 21 11:20:21 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 21 Mar 2014 11:20:21 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20140321102021.207161C02EA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70144:a4dca0f2cdfe Date: 2014-03-21 12:19 +0200 http://bitbucket.org/pypy/pypy/changeset/a4dca0f2cdfe/ Log: merge diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -440,11 +440,10 @@ return name - def getbuiltinmodule(self, name, force_init=False, reuse=True): + def getbuiltinmodule(self, name, force_init=False): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: - assert reuse is True try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -463,15 +462,7 @@ # Initialize the module from pypy.interpreter.module import Module if isinstance(w_mod, Module): - if not reuse and w_mod.startup_called: - # Create a copy of the module - w_mod.getdict(self) # unlazy w_initialdict - w_new = self.wrap(Module(self, w_name)) - self.call_method(w_new.getdict(self), 'update', - w_mod.w_initialdict) - w_mod = w_new - else: - w_mod.init(self) + w_mod.init(self) # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) diff --git a/pypy/module/_io/__init__.py b/pypy/module/_io/__init__.py --- a/pypy/module/_io/__init__.py +++ b/pypy/module/_io/__init__.py @@ -8,6 +8,8 @@ interpleveldefs = { 'DEFAULT_BUFFER_SIZE': 'space.wrap(interp_iobase.DEFAULT_BUFFER_SIZE)', 'BlockingIOError': 'interp_io.W_BlockingIOError', + 'UnsupportedOperation': + 'space.fromcache(interp_io.Cache).w_unsupportedoperation', '_IOBase': 'interp_iobase.W_IOBase', '_RawIOBase': 'interp_iobase.W_RawIOBase', '_BufferedIOBase': 'interp_bufferedio.W_BufferedIOBase', @@ -26,16 +28,6 @@ 'IncrementalNewlineDecoder': 'interp_textio.W_IncrementalNewlineDecoder', } - def init(self, space): - MixedModule.init(self, space) - w_UnsupportedOperation = space.call_function( - space.w_type, - space.wrap('UnsupportedOperation'), - space.newtuple([space.w_ValueError, space.w_IOError]), - space.newdict()) - space.setattr(self, space.wrap('UnsupportedOperation'), - w_UnsupportedOperation) - def shutdown(self, space): # at shutdown, flush all open streams. Ignore I/O errors. from pypy.module._io.interp_iobase import get_autoflusher diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -10,6 +10,12 @@ from rpython.rtyper.module.ll_os_stat import STAT_FIELD_TYPES +class Cache: + def __init__(self, space): + self.w_unsupportedoperation = space.new_exception_class( + "io.UnsupportedOperation", + space.newtuple([space.w_ValueError, space.w_IOError])) + class W_BlockingIOError(W_IOError): def __init__(self, space): W_IOError.__init__(self, space) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -579,8 +579,7 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True, - reuse=reuse) + return space.getbuiltinmodule(find_info.filename, force_init=True) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -203,6 +203,7 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 + skip("fix me") import sys, marshal old = marshal.loads @@ -222,6 +223,7 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( + skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -578,6 +578,7 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): + skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -585,6 +586,7 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): + skip("fix me") import sys, time oldpath = sys.path time.tzset = "" diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -583,6 +583,10 @@ emit_op_getfield_raw_pure = emit_op_getfield_gc emit_op_getfield_gc_pure = emit_op_getfield_gc + def emit_op_increment_debug_counter(self, op, arglocs, regalloc, fcond): + # XXX implement me + return fcond + def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, res_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -849,6 +849,10 @@ prepare_op_getfield_raw_pure = prepare_op_getfield_gc prepare_op_getfield_gc_pure = prepare_op_getfield_gc + def prepare_op_increment_debug_counter(self, op, fcond): + # XXX implement me + return [] + def prepare_op_getinteriorfield_gc(self, op, fcond): t = unpack_interiorfielddescr(op.getdescr()) ofs, itemsize, fieldsize, sign = t diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -553,6 +553,10 @@ else: return self.bh_raw_load_i(struct, offset, descr) + def bh_increment_debug_counter(self, addr): + p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) + p[0] += 1 + def unpack_arraydescr_size(self, arraydescr): from rpython.jit.backend.llsupport.symbolic import get_array_token from rpython.jit.backend.llsupport.descr import get_type_flag, FLAG_SIGNED diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -15,7 +15,7 @@ DEBUG_COUNTER = lltype.Struct('DEBUG_COUNTER', # 'b'ridge, 'l'abel or # 'e'ntry point - ('i', lltype.Signed), + ('i', lltype.Signed), # first field, at offset 0 ('type', lltype.Char), ('number', lltype.Signed) ) @@ -64,7 +64,6 @@ self.cpu = cpu self.memcpy_addr = 0 self.rtyper = cpu.rtyper - self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i') self._debug = False def setup_once(self): @@ -265,14 +264,8 @@ def _append_debugging_code(self, operations, tp, number, token): counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) - box = BoxInt() - box2 = BoxInt() - ops = [ResOperation(rop.GETFIELD_RAW, [c_adr], - box, descr=self.debug_counter_descr), - ResOperation(rop.INT_ADD, [box, ConstInt(1)], box2), - ResOperation(rop.SETFIELD_RAW, [c_adr, box2], - None, descr=self.debug_counter_descr)] - operations.extend(ops) + operations.append( + ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr], None)) def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4338,3 +4338,12 @@ assert rffi.cast(lltype.Signed, a[0]) == -7654 assert rffi.cast(lltype.Signed, a[1]) == 777 lltype.free(a, flavor='raw') + + def test_increment_debug_counter(self): + foo = lltype.malloc(rffi.CArray(lltype.Signed), 1, flavor='raw') + foo[0] = 1789200 + self.execute_operation(rop.INCREMENT_DEBUG_COUNTER, + [ConstInt(rffi.cast(lltype.Signed, foo))], + 'void') + assert foo[0] == 1789201 + lltype.free(foo, flavor='raw') diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -434,8 +434,8 @@ self.wb_slowpath[withcards + 2 * withfloats] = rawstart @rgc.no_release_gil - def assemble_loop(self, logger, loopname, inputargs, operations, looptoken, - log): + def assemble_loop(self, inputargs, operations, looptoken, log, + loopname, logger): '''adds the following attributes to looptoken: _ll_function_addr (address of the generated func, as an int) _ll_loop_code (debug: addr of the start of the ResOps) @@ -514,8 +514,8 @@ size_excluding_failure_stuff - looppos) @rgc.no_release_gil - def assemble_bridge(self, logger, faildescr, inputargs, operations, - original_loop_token, log): + def assemble_bridge(self, faildescr, inputargs, operations, + original_loop_token, log, logger): if not we_are_translated(): # Arguments should be unique assert len(set(inputargs)) == len(inputargs) @@ -1469,6 +1469,14 @@ ofs_loc) self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc) + def genop_discard_increment_debug_counter(self, op, arglocs): + # The argument should be an immediate address. This should + # generate code equivalent to a GETFIELD_RAW, an ADD(1), and a + # SETFIELD_RAW. Here we use the direct from-memory-to-memory + # increment operation of x86. + base_loc, = arglocs + self.mc.INC(mem(base_loc, 0)) + def genop_discard_setfield_gc(self, op, arglocs): base_loc, ofs_loc, size_loc, value_loc = arglocs assert isinstance(size_loc, ImmedLoc) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1003,6 +1003,10 @@ consider_getfield_raw_pure = consider_getfield_gc consider_getfield_gc_pure = consider_getfield_gc + def consider_increment_debug_counter(self, op): + base_loc = self.loc(op.getarg(0)) + self.perform_discard(op, [base_loc]) + def consider_getarrayitem_gc(self, op): itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) args = op.getarglist() diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -488,12 +488,22 @@ for possible_code in unrolling_location_codes: if code == possible_code: val = getattr(loc, "value_" + possible_code)() - if self.WORD == 8 and possible_code == 'i' and not rx86.fits_in_32bits(val): - self._load_scratch(val) + # Faking out of certain operations for x86_64 + fits32 = rx86.fits_in_32bits + if possible_code == 'i' and not fits32(val): + self._load_scratch(val) # for 'PUSH(imm)' _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) - else: - methname = name + "_" + possible_code - _rx86_getattr(self, methname)(val) + return + if possible_code == 'j' and not fits32(val): + val = self._addr_as_reg_offset(val) + _rx86_getattr(self, name + "_m")(val) + return + if possible_code == 'm' and not fits32(val[1]): + val = self._fix_static_offset_64_m(val) + if possible_code == 'a' and not fits32(val[3]): + val = self._fix_static_offset_64_a(val) + methname = name + "_" + possible_code + _rx86_getattr(self, methname)(val) return func_with_new_name(INSN, "INSN_" + name) @@ -600,6 +610,7 @@ TEST8 = _binaryop('TEST8') BTS = _binaryop('BTS') + INC = _unaryop('INC') ADD = _binaryop('ADD') SUB = _binaryop('SUB') IMUL = _binaryop('IMUL') diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -93,16 +93,15 @@ def compile_loop(self, inputargs, operations, looptoken, log=True, name='', logger=None): - return self.assembler.assemble_loop(logger, name, inputargs, operations, - looptoken, log=log) + return self.assembler.assemble_loop(inputargs, operations, looptoken, log, + name, logger) def compile_bridge(self, faildescr, inputargs, operations, original_loop_token, log=True, logger=None): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(logger, faildescr, inputargs, - operations, - original_loop_token, log=log) + return self.assembler.assemble_bridge(faildescr, inputargs, operations, + original_loop_token, log, logger) def clear_latest_values(self, count): setitem = self.assembler.fail_boxes_ptr.setitem diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -470,6 +470,9 @@ # ------------------------------ Arithmetic ------------------------------ + INC_m = insn(rex_w, '\xFF', orbyte(0), mem_reg_plus_const(1)) + INC_j = insn(rex_w, '\xFF', orbyte(0), abs_(1)) + ADD_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_ = common_modes(0) OR_ri, OR_rr, OR_rb, _,_,OR_rm, OR_rj, _,_ = common_modes(1) AND_ri,AND_rr,AND_rb,_,_,AND_rm,AND_rj,_,_ = common_modes(4) diff --git a/rpython/jit/backend/x86/test/test_regloc.py b/rpython/jit/backend/x86/test/test_regloc.py --- a/rpython/jit/backend/x86/test/test_regloc.py +++ b/rpython/jit/backend/x86/test/test_regloc.py @@ -373,3 +373,56 @@ '\x59' ) assert cb.getvalue() == expected_instructions + + # ------------------------------------------------------------ + + def test_push_immed64(self): + immed = 0x0123456789ABCDEF + cb = LocationCodeBuilder64() + cb.PUSH(imm(immed)) + # + expected_instructions = ( + # mov r11, 0x0123456789ABCDEF + '\x49\xBB\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # push r11 + '\x41\x53' + ) + assert cb.getvalue() == expected_instructions + + def test_inc_64bit_address_1(self): + base_addr = 0x0123456789ABCDEF + cb = LocationCodeBuilder64() + cb.INC(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr)) + # this case is a INC_j + # + expected_instructions = ( + # mov r11, 0x0123456789ABCDEF + '\x49\xBB\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # inc [r11] + '\x49\xFF\x03' + ) + assert cb.getvalue() == expected_instructions + + def test_inc_64bit_address_2(self): + py.test.skip("there is no unary instruction INSN_a so far") + base_addr = 0x0123456789ABCDEF + cb = LocationCodeBuilder64() + cb.INC(AddressLoc(ImmedLoc(0), edx, 3, base_addr)) + # this case would be a INC_a + xxx + + def test_inc_64bit_address_3(self): + base_addr = 0x0123456789ABCDEF + cb = LocationCodeBuilder64() + cb.INC(AddressLoc(eax, ImmedLoc(0), 0, base_addr)) + # this case is a INC_m + # + expected_instructions = ( + # mov r11, 0x0123456789ABCDEF + '\x49\xBB\xEF\xCD\xAB\x89\x67\x45\x23\x01' + # lea r11, [rax+r11] + '\x4E\x8D\x1C\x18' + # inc [r11] + '\x49\xFF\x03' + ) + assert cb.getvalue() == expected_instructions diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -427,8 +427,8 @@ debug._log = None # assert ops_offset is looptoken._x86_ops_offset - # 2*(getfield_raw/int_add/setfield_raw) + ops + None - assert len(ops_offset) == 2*3 + len(operations) + 1 + # 2*increment_debug_counter + ops + None + assert len(ops_offset) == 2 + len(operations) + 1 assert (ops_offset[operations[0]] <= ops_offset[operations[1]] <= ops_offset[operations[2]] <= diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -73,7 +73,7 @@ def guess_call_kind(self, op): return 'residual' def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None): + extraeffect=None, extradescr=None): try: name = op.args[0].value._obj._name if 'cannot_raise' in name or name.startswith('cast_'): diff --git a/rpython/jit/codewriter/test/test_longlong.py b/rpython/jit/codewriter/test/test_longlong.py --- a/rpython/jit/codewriter/test/test_longlong.py +++ b/rpython/jit/codewriter/test/test_longlong.py @@ -17,7 +17,7 @@ class FakeBuiltinCallControl: def guess_call_kind(self, op): return 'builtin' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None, extradescr=None): assert oopspecindex is not None # in this test return 'calldescr-%d' % oopspecindex def calldescr_canraise(self, calldescr): diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -332,6 +332,7 @@ continue if value in (rop.FORCE_TOKEN, rop.CALL_ASSEMBLER, + rop.INCREMENT_DEBUG_COUNTER, rop.COND_CALL_GC_WB, rop.COND_CALL_GC_WB_ARRAY, rop.DEBUG_MERGE_POINT, diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -306,13 +306,13 @@ def _optimize_CALL_DICT_LOOKUP(self, op): descrs = op.getdescr().get_extra_info().extradescrs + assert descrs # translation hint descr1 = descrs[0] - descr2 = descrs[1] - if descr1 in self.cached_dict_reads: + try: d = self.cached_dict_reads[descr1] - else: + except KeyError: d = self.cached_dict_reads[descr1] = args_dict() - self.corresponding_array_descrs[descr2] = descr1 + self.corresponding_array_descrs[descrs[1]] = descr1 args = self.optimizer.make_args_key(op) try: res_v = d[args] diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -494,6 +494,7 @@ # must be forced, however we need to execute it anyway '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- + 'INCREMENT_DEBUG_COUNTER/1', 'SETARRAYITEM_GC/3d', 'SETARRAYITEM_RAW/3d', 'SETINTERIORFIELD_GC/3d', From noreply at buildbot.pypy.org Fri Mar 21 11:27:28 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:27:28 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Added configurable flag to enable/disable usage of a rerased pair. Will compare performance. Message-ID: <20140321102728.A0C031C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r675:d878eb7b148c Date: 2014-03-20 14:55 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/d878eb7b148c/ Log: Added configurable flag to enable/disable usage of a rerased pair. Will compare performance. diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -1,5 +1,6 @@ + +import sys from spyvm import model, shadow - from rpython.rlib import rerased from rpython.rlib.objectmodel import import_from_mixin @@ -53,13 +54,13 @@ strategy_tag = 'abstract-int' def storage(self, w_obj): - return w_obj.int_storage + return self.unerase(w_obj.int_storage) def set_initial_storage(self, space, w_obj, size): - w_obj.int_storage = self.initial_storage(space, size) + w_obj.int_storage = self.erase(self.initial_storage(space, size)) def set_storage_for_list(self, space, w_obj, collection): - w_obj.int_storage = self.storage_for_list(space, collection) + w_obj.int_storage = self.erase(self.storage_for_list(space, collection)) def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): - w_obj.int_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) + w_obj.int_storage = self.erase(self.copy_storage_from(space, w_source_obj, reuse_storage)) def initial_storage(self, space, size): raise NotImplementedError("Abstract base class") @@ -79,18 +80,20 @@ result.singleton = result() return result -class BasicStorageStrategyMixin(object): - def erase(self, a): return a - def unerase(self, a): return a - # erase, unerase = rerased.new_static_erasing_pair(self.strategy_tag) +use_rerased = False +def setup_rerased_pair(): + locals = sys._getframe(1).f_locals + if use_rerased: + locals["erase"], locals["unerase"] = rerased.new_static_erasing_pair("strategy-%s" % locals["strategy_tag"]) + else: + locals["erase"], locals["unerase"] = lambda self, x: x, lambda self, x: x # this is the typical "initial" storage strategy, for when every slot # in an object is still nil. No storage is allocated. class AllNilStorageStrategy(AbstractStorageStrategy): __metaclass__ = SingletonMeta - # erase, unerase = rerased.new_static_erasing_pair("allnil-strategy") - import_from_mixin(BasicStorageStrategyMixin) strategy_tag = 'allnil' + setup_rerased_pair() def fetch(self, space, w_obj, n0): return model.w_nil @@ -116,9 +119,8 @@ # fixed-sized and var-sized objects. class ListStorageStrategy(AbstractListStorageStrategy): __metaclass__ = SingletonMeta - # erase, unerase = rerased.new_static_erasing_pair("list-strategy") - import_from_mixin(BasicStorageStrategyMixin) strategy_tag = 'list' + setup_rerased_pair() def fetch(self, space, w_obj, n0): return self.storage(w_obj)[n0] @@ -135,9 +137,8 @@ class TaggingSmallIntegerStorageStrategy(AbstractIntStorageStrategy): __metaclass__ = SingletonMeta - # erase, unerase = rerased.new_static_erasing_pair("tagging-smallint-strategry") - import_from_mixin(BasicStorageStrategyMixin) strategy_tag = 'tagging-smallint' + setup_rerased_pair() needs_objspace = True @staticmethod From noreply at buildbot.pypy.org Fri Mar 21 11:27:29 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:27:29 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Added comment Message-ID: <20140321102729.BCF711C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r676:30ce07241962 Date: 2014-03-20 14:59 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/30ce07241962/ Log: Added comment diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -82,11 +82,12 @@ use_rerased = False def setup_rerased_pair(): - locals = sys._getframe(1).f_locals + # Small piece of metaprogramming stolen from rpython.rlib.objectmodel.import_from_mixin + cls = sys._getframe(1).f_locals if use_rerased: - locals["erase"], locals["unerase"] = rerased.new_static_erasing_pair("strategy-%s" % locals["strategy_tag"]) + cls["erase"], cls["unerase"] = rerased.new_static_erasing_pair("strategy-%s" % cls["strategy_tag"]) else: - locals["erase"], locals["unerase"] = lambda self, x: x, lambda self, x: x + cls["erase"], cls["unerase"] = lambda self, x: x, lambda self, x: x # this is the typical "initial" storage strategy, for when every slot # in an object is still nil. No storage is allocated. From noreply at buildbot.pypy.org Fri Mar 21 11:27:30 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:27:30 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Fixed the configurable rerased thing. Message-ID: <20140321102730.D6ACB1C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r677:9aca5f6a189d Date: 2014-03-20 15:33 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/9aca5f6a189d/ Log: Fixed the configurable rerased thing. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -654,8 +654,6 @@ class W_PointersObject(W_AbstractPointersObject): _attrs_ = ['_size', 'list_storage', 'int_storage', 'strategy'] - list_storage = None - int_storage = None @jit.unroll_safe def __init__(self, space, w_class, size): @@ -738,8 +736,26 @@ return False self.strategy, w_other.strategy = w_other.strategy, self.strategy self._size, w_other._size = w_other._size, self._size - self.list_storage, w_other.list_storage = w_other.list_storage, self.list_storage - self.int_storage, w_other.int_storage = w_other.int_storage, self.int_storage + + # Unfortunately, the following is necessary to work both with RPYTHON and in interpreted mode. + # Rpython cannot handle list_storage = None in combination with a rerased pair. + + if hasattr(self, 'list_storage'): + if hasattr(w_other, 'list_storage'): + self.list_storage, w_other.list_storage = w_other.list_storage, self.list_storage + else: + w_other.list_storage = self.list_storage + elif hasattr(w_other, 'list_storage'): + self.list_storage = w_other.list_storage + + if hasattr(self, 'int_storage'): + if hasattr(w_other, 'int_storage'): + self.int_storage, w_other.int_storage = w_other.int_storage, self.int_storage + else: + w_other.int_storage = self.int_storage + elif hasattr(w_other, 'int_storage'): + self.int_storage = w_other.int_storage + return W_AbstractPointersObject.become(self, w_other) @jit.unroll_safe diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -30,13 +30,13 @@ strategy_tag = 'abstract-list' def storage(self, w_obj): - return w_obj.list_storage + return self.unerase(w_obj.list_storage) def set_initial_storage(self, space, w_obj, size): - w_obj.list_storage = self.initial_storage(space, size) + w_obj.list_storage = self.erase(self.initial_storage(space, size)) def set_storage_for_list(self, space, w_obj, collection): - w_obj.list_storage = self.storage_for_list(space, collection) + w_obj.list_storage = self.erase(self.storage_for_list(space, collection)) def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): - w_obj.list_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) + w_obj.list_storage = self.erase(self.copy_storage_from(space, w_source_obj, reuse_storage)) def initial_storage(self, space, size): raise NotImplementedError("Abstract base class") From noreply at buildbot.pypy.org Fri Mar 21 11:27:32 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:27:32 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Fix did not work, had to use we_are_translated(). Message-ID: <20140321102732.062441C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r678:4aa0466bbb5a Date: 2014-03-20 16:14 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/4aa0466bbb5a/ Log: Fix did not work, had to use we_are_translated(). Also removed obsolete pieces of fieldtype functionality. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -22,7 +22,7 @@ from rpython.rlib.rarithmetic import intmask, r_uint, r_int from rpython.rlib.debug import make_sure_not_resized from rpython.tool.pairtype import extendabletype -from rpython.rlib.objectmodel import instantiate, compute_hash, import_from_mixin +from rpython.rlib.objectmodel import instantiate, compute_hash, import_from_mixin, we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi from rsdl import RSDL, RSDL_helper @@ -143,10 +143,6 @@ def unwrap_uint(self, space): raise error.UnwrappingError("Got unexpected class in unwrap_uint") - def fieldtype(self): - from spyvm.strategies import obj - return obj - def is_array_object(self): return False @@ -221,10 +217,6 @@ def clone(self, space): return self - def fieldtype(self): - from spyvm.strategies import SInt - return SInt - class W_AbstractObjectWithIdentityHash(W_Object): """Object with explicit hash (ie all except small ints and floats).""" @@ -327,10 +319,6 @@ def invariant(self): return isinstance(self.value, int) - def fieldtype(self): - from spyvm.strategies import LPI - return LPI - def is_array_object(self): return True @@ -422,10 +410,6 @@ def size(self): return 2 - def fieldtype(self): - from spyvm.strategies import flt - return flt - @signature.finishsigs class W_AbstractObjectWithClassReference(W_AbstractObjectWithIdentityHash): """Objects with arbitrary class (ie not CompiledMethod, SmallInteger or @@ -654,6 +638,9 @@ class W_PointersObject(W_AbstractPointersObject): _attrs_ = ['_size', 'list_storage', 'int_storage', 'strategy'] + if not we_are_translated(): + list_storage = None + int_storage = None @jit.unroll_safe def __init__(self, space, w_class, size): @@ -736,26 +723,8 @@ return False self.strategy, w_other.strategy = w_other.strategy, self.strategy self._size, w_other._size = w_other._size, self._size - - # Unfortunately, the following is necessary to work both with RPYTHON and in interpreted mode. - # Rpython cannot handle list_storage = None in combination with a rerased pair. - - if hasattr(self, 'list_storage'): - if hasattr(w_other, 'list_storage'): - self.list_storage, w_other.list_storage = w_other.list_storage, self.list_storage - else: - w_other.list_storage = self.list_storage - elif hasattr(w_other, 'list_storage'): - self.list_storage = w_other.list_storage - - if hasattr(self, 'int_storage'): - if hasattr(w_other, 'int_storage'): - self.int_storage, w_other.int_storage = w_other.int_storage, self.int_storage - else: - w_other.int_storage = self.int_storage - elif hasattr(w_other, 'int_storage'): - self.int_storage = w_other.int_storage - + self.list_storage, w_other.list_storage = w_other.list_storage, self.list_storage + self.int_storage, w_other.int_storage = w_other.int_storage, self.int_storage return W_AbstractPointersObject.become(self, w_other) @jit.unroll_safe @@ -766,13 +735,9 @@ self.log_strategy_operation("Cloned") return w_result - def fieldtype(self): - from spyvm.strategies import obj - return obj - class W_WeakPointersObject(W_AbstractPointersObject): _attrs_ = ['_weakvars'] - + @jit.unroll_safe def __init__(self, space, w_class, size): W_AbstractPointersObject.__init__(self, space, w_class, size) diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -4,8 +4,6 @@ from spyvm.test import test_miniimage as tools from spyvm.error import WrapperException, FatalError -# Fieldtypes have a separate test file - space, interp = tools.setup_module(tools, filename='bootstrapped.image') class_Array = space.classtable["w_Array"] From noreply at buildbot.pypy.org Fri Mar 21 11:27:33 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:27:33 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Removed rerased functionality completely, since it is only hindering the trace optimizer. Message-ID: <20140321102733.125FE1C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r679:106d0c112fc1 Date: 2014-03-20 16:23 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/106d0c112fc1/ Log: Removed rerased functionality completely, since it is only hindering the trace optimizer. diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -1,7 +1,6 @@ import sys from spyvm import model, shadow -from rpython.rlib import rerased from rpython.rlib.objectmodel import import_from_mixin # Disables all optimized strategies, for debugging. @@ -30,13 +29,13 @@ strategy_tag = 'abstract-list' def storage(self, w_obj): - return self.unerase(w_obj.list_storage) + return w_obj.list_storage def set_initial_storage(self, space, w_obj, size): - w_obj.list_storage = self.erase(self.initial_storage(space, size)) + w_obj.list_storage = self.initial_storage(space, size) def set_storage_for_list(self, space, w_obj, collection): - w_obj.list_storage = self.erase(self.storage_for_list(space, collection)) + w_obj.list_storage = self.storage_for_list(space, collection) def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): - w_obj.list_storage = self.erase(self.copy_storage_from(space, w_source_obj, reuse_storage)) + w_obj.list_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) def initial_storage(self, space, size): raise NotImplementedError("Abstract base class") @@ -54,13 +53,13 @@ strategy_tag = 'abstract-int' def storage(self, w_obj): - return self.unerase(w_obj.int_storage) + return w_obj.int_storage def set_initial_storage(self, space, w_obj, size): - w_obj.int_storage = self.erase(self.initial_storage(space, size)) + w_obj.int_storage = self.initial_storage(space, size) def set_storage_for_list(self, space, w_obj, collection): - w_obj.int_storage = self.erase(self.storage_for_list(space, collection)) + w_obj.int_storage = self.storage_for_list(space, collection) def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): - w_obj.int_storage = self.erase(self.copy_storage_from(space, w_source_obj, reuse_storage)) + w_obj.int_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) def initial_storage(self, space, size): raise NotImplementedError("Abstract base class") @@ -80,21 +79,11 @@ result.singleton = result() return result -use_rerased = False -def setup_rerased_pair(): - # Small piece of metaprogramming stolen from rpython.rlib.objectmodel.import_from_mixin - cls = sys._getframe(1).f_locals - if use_rerased: - cls["erase"], cls["unerase"] = rerased.new_static_erasing_pair("strategy-%s" % cls["strategy_tag"]) - else: - cls["erase"], cls["unerase"] = lambda self, x: x, lambda self, x: x - # this is the typical "initial" storage strategy, for when every slot # in an object is still nil. No storage is allocated. class AllNilStorageStrategy(AbstractStorageStrategy): __metaclass__ = SingletonMeta strategy_tag = 'allnil' - setup_rerased_pair() def fetch(self, space, w_obj, n0): return model.w_nil @@ -121,7 +110,6 @@ class ListStorageStrategy(AbstractListStorageStrategy): __metaclass__ = SingletonMeta strategy_tag = 'list' - setup_rerased_pair() def fetch(self, space, w_obj, n0): return self.storage(w_obj)[n0] @@ -139,7 +127,6 @@ class TaggingSmallIntegerStorageStrategy(AbstractIntStorageStrategy): __metaclass__ = SingletonMeta strategy_tag = 'tagging-smallint' - setup_rerased_pair() needs_objspace = True @staticmethod From noreply at buildbot.pypy.org Fri Mar 21 11:27:34 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:27:34 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Generalized TaggingStrategy to ValueOrNilStrategy, added strategy for floats. Message-ID: <20140321102734.31ED71C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r680:74e7f1232b64 Date: 2014-03-20 17:37 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/74e7f1232b64/ Log: Generalized TaggingStrategy to ValueOrNilStrategy, added strategy for floats. Small renamings and refactorings. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -644,10 +644,10 @@ @jit.unroll_safe def __init__(self, space, w_class, size): - from spyvm.strategies import strategy_of_size + from spyvm.strategies import empty_strategy """Create new object with size = fixed + variable size.""" W_AbstractPointersObject.__init__(self, space, w_class, size) - self.strategy = strategy_of_size(self.s_class, size) + self.strategy = empty_strategy(self.s_class) self.initialize_storage(space, size) self.log_strategy_operation("Initialized") diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -1,7 +1,8 @@ import sys -from spyvm import model, shadow +from spyvm import model, shadow, constants from rpython.rlib.objectmodel import import_from_mixin +from rpython.rlib.rfloat import string_to_float # Disables all optimized strategies, for debugging. only_list_storage = False @@ -20,9 +21,17 @@ def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): raise NotImplementedError("Abstract base class") + def store(self, space, w_obj, n0, w_val): + if self.can_contain(space, w_val): + self.do_store(space, w_obj, n0, w_val) + new_strategy = find_strategy_for_object(w_val) + return w_obj.store_with_new_strategy(space, new_strategy, n0, w_val) + + def can_contain(self, space, w_val): + raise NotImplementedError("Abstract base class") def fetch(self, space, w_obj, n0): raise NotImplementedError("Abstract base class") - def store(self, space, w_obj, n0, w_val): + def do_store(self, space, w_obj, n0, w_val): raise NotImplementedError("Abstract base class") class AbstractListStorageStrategy(AbstractStorageStrategy): @@ -85,17 +94,12 @@ __metaclass__ = SingletonMeta strategy_tag = 'allnil' + def can_contain(self, space, w_obj): + return w_obj == model.w_nil def fetch(self, space, w_obj, n0): return model.w_nil - - def store(self, space, w_obj, n0, w_val): - # This is an important moment, where we decide where to go on the first non-nil store. - if w_val == model.w_nil: - return - if not only_list_storage: - if TaggingSmallIntegerStorageStrategy.can_contain(w_val): - return w_obj.store_with_new_strategy(space, TaggingSmallIntegerStorageStrategy.singleton, n0, w_val) - return w_obj.store_with_new_strategy(space, ListStorageStrategy.singleton, n0, w_val) + def do_store(self, space, w_obj, n0, w_val): + pass def set_initial_storage(self, space, w_obj, size): pass @@ -111,9 +115,11 @@ __metaclass__ = SingletonMeta strategy_tag = 'list' + def can_contain(self, space, w_val): + return True def fetch(self, space, w_obj, n0): return self.storage(w_obj)[n0] - def store(self, space, w_obj, n0, w_val): + def do_store(self, space, w_obj, n0, w_val): # TODO enable generalization by maintaining a counter of elements that are nil. self.storage(w_obj)[n0] = w_val def initial_storage(self, space, size): @@ -124,56 +130,90 @@ length = w_obj.basic_size() return [w_obj.strategy.fetch(space, w_obj, i) for i in range(length)] -class TaggingSmallIntegerStorageStrategy(AbstractIntStorageStrategy): - __metaclass__ = SingletonMeta - strategy_tag = 'tagging-smallint' +class AbstractValueOrNilStorageStrategy(AbstractIntStorageStrategy): needs_objspace = True + strategy_tag = 'abstract-valueOrNil' - @staticmethod - def wrap(val): - return val << 1 - @staticmethod - def unwrap(val): - return val >> 1 - @staticmethod - def can_contain(w_val): - return isinstance(w_val, model.W_SmallInteger) - # TODO - use just a single value to represent nil (max_int-1) - # Then, turn wrap/unwrap into noops - # also store W_LargePositiveInteger1Word? - nil_value = 1 + def can_contain(self, space, w_val): + return w_val == model.w_nil or (isinstance(w_val, self.wrapper_class) and self.unwrap(space, w_val) != self.nil_value) def fetch(self, space, w_obj, n0): val = self.storage(w_obj)[n0] if val == self.nil_value: return space.w_nil else: - return space.wrap_int(self.unwrap(val)) + return self.wrap(space, val) - def store(self, space, w_obj, n0, w_val): + def do_store(self, space, w_obj, n0, w_val): store = self.storage(w_obj) - if self.can_contain(w_val): - store[n0] = self.wrap(space.unwrap_int(w_val)) + if w_val == space.w_nil: + store[n0] = self.nil_value else: - if w_val == space.w_nil: - # TODO - generelize to AllNilStorage by maintaining a counter of nil-elements - store[n0] = self.nil_value - else: - # Storing a wrong type - dehomogenize to ListStorage - return w_obj.store_with_new_strategy(space, ListStorageStrategy.singleton, n0, w_val) - + store[n0] = self.unwrap(space, w_val) + def initial_storage(self, space, size): return [self.nil_value] * size - + def storage_for_list(self, space, collection): length = len(collection) - store = [self.nil_value] * length + store = self.initial_storage(length) for i in range(length): if collection[i] != space.w_nil: - store[i] = self.wrap(space.unwrap_int(collection[i])) + store[i] = self.unwrap(space, collection[i]) return store -def strategy_of_size(s_containing_class, size): +class SmallIntegerOrNilStorageStrategy(AbstractValueOrNilStorageStrategy): + __metaclass__ = SingletonMeta + strategy_tag = 'float-orNil' + nil_value = constants.MAXINT + wrapper_class = model.W_SmallInteger + def wrap(self, space, val): return space.wrap_int(val) + def unwrap(self, space, w_val): return space.unwrap_int(w_val) + +class FloatOrNilStorageStrategy(AbstractValueOrNilStorageStrategy): + __metaclass__ = SingletonMeta + strategy_tag = 'smallint-orNil' + nil_value = string_to_float("-nan") + wrapper_class = model.W_Float + def wrap(self, space, val): return space.wrap_float(val) + def unwrap(self, space, w_val): return space.unwrap_float(w_val) + +def find_strategy_for_object(space, var): + return find_strategy_for_objects(space, [var]) + +def find_strategy_for_objects(space, vars): + if only_list_storage: + ListStorageStrategy.singleton + + specialized_strategies = 3 + all_nil_can_handle = True + small_int_can_handle = True + float_can_handle = True + for w_obj in vars: + if all_nil_can_handle and not AllNilStorageStrategy.singleton.can_contain(space, w_obj): + all_nil_can_handle = False + specialized_strategies = specialized_strategies - 1 + if small_int_can_handle and not SmallIntegerOrNilStorageStrategy.singleton.can_contain(space, w_obj): + small_int_can_handle = False + specialized_strategies = specialized_strategies - 1 + if float_can_handle and not FloatOrNilStorageStrategy.singleton.can_contain(space, w_obj): + float_can_handle = False + specialized_strategies = specialized_strategies - 1 + + if specialized_strategies <= 0: + return ListStorageStrategy.singleton + + if all_nil_can_handle: + return AllNilStorageStrategy.singleton + if small_int_can_handle: + return SmallIntegerOrNilStorageStrategy.singleton + if float_can_handle: + return FloatOrNilStorageStrategy.singleton + + # If this happens, please look for a bug in the code above. + assert False, "No strategy could be found for list %r" % vars + +def empty_strategy(s_containing_class): if s_containing_class is None: # This is a weird and rare special case for w_nil return ListStorageStrategy.singleton @@ -194,19 +234,7 @@ # Ths class object shadows are not yet synchronized. return ListStorageStrategy.singleton - if not is_variable or only_list_storage: + if is_variable: + return find_strategy_for_objects(s_containing_class.space, vars) + else: return ListStorageStrategy.singleton - - is_all_nils = True - for w_obj in vars: - if w_obj != model.w_nil: - is_all_nils = False - if not TaggingSmallIntegerStorageStrategy.can_contain(w_obj): - # TODO -- here we can still optimize if there is only - # one single type in the collection. - return ListStorageStrategy.singleton - if is_all_nils: - return AllNilStorageStrategy.singleton - else: - return TaggingSmallIntegerStorageStrategy.singleton - \ No newline at end of file From noreply at buildbot.pypy.org Fri Mar 21 11:27:35 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:27:35 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Fixes Message-ID: <20140321102735.54FBF1C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r681:bb282722e3e4 Date: 2014-03-20 17:41 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/bb282722e3e4/ Log: Fixes diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -23,8 +23,8 @@ def store(self, space, w_obj, n0, w_val): if self.can_contain(space, w_val): - self.do_store(space, w_obj, n0, w_val) - new_strategy = find_strategy_for_object(w_val) + return self.do_store(space, w_obj, n0, w_val) + new_strategy = find_strategy_for_object(space, w_val) return w_obj.store_with_new_strategy(space, new_strategy, n0, w_val) def can_contain(self, space, w_val): @@ -146,7 +146,7 @@ def do_store(self, space, w_obj, n0, w_val): store = self.storage(w_obj) - if w_val == space.w_nil: + if w_val == model.w_nil: store[n0] = self.nil_value else: store[n0] = self.unwrap(space, w_val) @@ -156,9 +156,9 @@ def storage_for_list(self, space, collection): length = len(collection) - store = self.initial_storage(length) + store = self.initial_storage(space, length) for i in range(length): - if collection[i] != space.w_nil: + if collection[i] != model.w_nil: store[i] = self.unwrap(space, collection[i]) return store From noreply at buildbot.pypy.org Fri Mar 21 11:27:36 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:27:36 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Using a list of floats to store either float or int values. Message-ID: <20140321102736.7CF601C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r682:efb3ddfef529 Date: 2014-03-20 19:14 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/efb3ddfef529/ Log: Using a list of floats to store either float or int values. diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -1,6 +1,8 @@ -import sys +import sys, math from spyvm import model, shadow, constants +from rpython.rlib import longlong2float, rarithmetic +from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.rfloat import string_to_float @@ -133,13 +135,21 @@ class AbstractValueOrNilStorageStrategy(AbstractIntStorageStrategy): needs_objspace = True strategy_tag = 'abstract-valueOrNil' + # TODO -- use another value... something like max_float? + nil_value = string_to_float("nan") + + def is_nil_value(self, val): + # return val == self.nil_value + return math.isnan(val) def can_contain(self, space, w_val): - return w_val == model.w_nil or (isinstance(w_val, self.wrapper_class) and self.unwrap(space, w_val) != self.nil_value) + return w_val == model.w_nil or \ + (isinstance(w_val, self.wrapper_class) \ + and not self.is_nil_value(self.unwrap(space, w_val))) def fetch(self, space, w_obj, n0): val = self.storage(w_obj)[n0] - if val == self.nil_value: + if self.is_nil_value(val): return space.w_nil else: return self.wrap(space, val) @@ -147,7 +157,7 @@ def do_store(self, space, w_obj, n0, w_val): store = self.storage(w_obj) if w_val == model.w_nil: - store[n0] = self.nil_value + store[n0] = self.nil_value else: store[n0] = self.unwrap(space, w_val) @@ -162,21 +172,32 @@ store[i] = self.unwrap(space, collection[i]) return store +def _int_to_float(int_val): + return longlong2float.longlong2float(rffi.cast(lltype.SignedLongLong, int_val)) + class SmallIntegerOrNilStorageStrategy(AbstractValueOrNilStorageStrategy): __metaclass__ = SingletonMeta - strategy_tag = 'float-orNil' - nil_value = constants.MAXINT + strategy_tag = 'smallint-orNil' wrapper_class = model.W_SmallInteger - def wrap(self, space, val): return space.wrap_int(val) - def unwrap(self, space, w_val): return space.unwrap_int(w_val) + + def wrap(self, space, val): + int_val = rarithmetic.intmask(longlong2float.float2longlong(val)) + return space.wrap_int(int_val) + def unwrap(self, space, w_val): + assert isinstance(w_val, model.W_SmallInteger) + int_val = space.unwrap_int(w_val) + return _int_to_float(int_val) class FloatOrNilStorageStrategy(AbstractValueOrNilStorageStrategy): __metaclass__ = SingletonMeta - strategy_tag = 'smallint-orNil' - nil_value = string_to_float("-nan") + strategy_tag = 'float-orNil' wrapper_class = model.W_Float - def wrap(self, space, val): return space.wrap_float(val) - def unwrap(self, space, w_val): return space.unwrap_float(w_val) + + def wrap(self, space, val): + return space.wrap_float(val) + def unwrap(self, space, w_val): + assert isinstance(w_val, model.W_Float) + return space.unwrap_float(w_val) def find_strategy_for_object(space, var): return find_strategy_for_objects(space, [var]) @@ -211,7 +232,7 @@ return FloatOrNilStorageStrategy.singleton # If this happens, please look for a bug in the code above. - assert False, "No strategy could be found for list %r" % vars + assert False, "No strategy could be found for list..." def empty_strategy(s_containing_class): if s_containing_class is None: From noreply at buildbot.pypy.org Fri Mar 21 11:27:37 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:27:37 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Fixed test_strategies.py, added tests for FloatStrategy. Message-ID: <20140321102737.8D73B1C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r683:818054edd715 Date: 2014-03-20 19:36 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/818054edd715/ Log: Fixed test_strategies.py, added tests for FloatStrategy. diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -26,7 +26,7 @@ def store(self, space, w_obj, n0, w_val): if self.can_contain(space, w_val): return self.do_store(space, w_obj, n0, w_val) - new_strategy = find_strategy_for_object(space, w_val) + new_strategy = find_strategy_for_objects(space, [w_val]) return w_obj.store_with_new_strategy(space, new_strategy, n0, w_val) def can_contain(self, space, w_val): @@ -199,9 +199,6 @@ assert isinstance(w_val, model.W_Float) return space.unwrap_float(w_val) -def find_strategy_for_object(space, var): - return find_strategy_for_objects(space, [var]) - def find_strategy_for_objects(space, vars): if only_list_storage: ListStorageStrategy.singleton diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -15,24 +15,29 @@ a.store(space, 0, arr(1)) return a -def tagging_arr(size): +def int_arr(size): a = arr(size) a.store(space, 0, space.wrap_int(12)) return a -def tagging_arr_odd(size): +def float_arr(size): a = arr(size) - a.store(space, 2, space.wrap_int(12)) + a.store(space, 0, space.wrap_float(1.2)) return a def check_arr(arr, expected): for i in range(arr.basic_size()): + w_val = arr.fetch(space, i) if expected[i] == w_nil: - assert arr.fetch(space, i) == w_nil - else: - w_val = arr.fetch(space, i) + assert w_val == w_nil + elif isinstance(expected[i], int): assert isinstance(w_val, model.W_SmallInteger) assert space.unwrap_int(w_val) == expected[i] + elif isinstance(expected[i], float): + assert isinstance(w_val, model.W_Float) + assert space.unwrap_float(w_val) == expected[i] + else: + assert False, "Unexpected array of expected values." # ====== AllNil StorageStrategy @@ -76,41 +81,82 @@ a.store(space, 1, arr(1)) assert a.basic_size() == 5 -# ====== Tagging SmallInteger StorageStrategy +# ====== SmallIntegerOrNil StorageStrategy def test_AllNil_to_Int(): - a = tagging_arr(5) - assert isinstance(a.strategy, strategies.TaggingSmallIntegerStorageStrategy) + a = int_arr(5) + assert isinstance(a.strategy, strategies.SmallIntegerOrNilStorageStrategy) check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) -def test_Tagging_store(): - a = tagging_arr(5) +def test_SmallInt_store(): + a = int_arr(5) a.store(space, 1, space.wrap_int(20)) a.store(space, 2, space.wrap_int(20)) - assert isinstance(a.strategy, strategies.TaggingSmallIntegerStorageStrategy) + assert isinstance(a.strategy, strategies.SmallIntegerOrNilStorageStrategy) check_arr(a, [12, 20, 20, w_nil, w_nil]) -def test_Tagging_store_nil_to_nil(): - a = tagging_arr_odd(5) +def test_SmallInt_store_nil_to_nil(): + a = int_arr(5) a.store(space, 1, w_nil) - check_arr(a, [w_nil, w_nil, 12, w_nil, w_nil]) + check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) -def test_Tagging_delete(): - a = tagging_arr_odd(5) +def test_SmallInt_overwrite(): + a = int_arr(5) a.store(space, 1, space.wrap_int(1)) a.store(space, 3, space.wrap_int(2)) - a.store(space, 2, space.wrap_int(100)) + a.store(space, 0, space.wrap_int(100)) a.store(space, 1, space.wrap_int(200)) a.store(space, 3, space.wrap_int(300)) - check_arr(a, [w_nil, 200, 100, 300, w_nil]) + check_arr(a, [100, 200, w_nil, 300, w_nil]) -def test_Tagging_delete_first(): - a = tagging_arr_odd(5) +def test_SmallInt_delete(): + a = int_arr(5) a.store(space, 1, space.wrap_int(1)) a.store(space, 1, w_nil) - check_arr(a, [w_nil, w_nil, 12, w_nil, w_nil]) + check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) -def test_Tagging_to_List(): - a = tagging_arr_odd(5) +def test_SmallInt_to_List(): + a = int_arr(5) a.store(space, 1, arr(1)) assert isinstance(a.strategy, strategies.ListStorageStrategy) + +# ====== FloatOrNil StorageStrategy + +def test_AllNil_to_Float(): + a = float_arr(5) + assert isinstance(a.strategy, strategies.FloatOrNilStorageStrategy) + check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) + +def test_Float_store(): + a = float_arr(5) + a.store(space, 1, space.wrap_float(20.0)) + a.store(space, 2, space.wrap_float(20.0)) + assert isinstance(a.strategy, strategies.FloatOrNilStorageStrategy) + check_arr(a, [1.2, 20.0, 20.0, w_nil, w_nil]) + +def test_Float_store_nil_to_nil(): + a = float_arr(5) + a.store(space, 1, w_nil) + check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) + +def test_Float_overwrite(): + a = float_arr(5) + a.store(space, 1, space.wrap_float(1.0)) + a.store(space, 3, space.wrap_float(2.0)) + a.store(space, 0, space.wrap_float(100.0)) + a.store(space, 1, space.wrap_float(200.0)) + a.store(space, 3, space.wrap_float(300.0)) + check_arr(a, [100.0, 200.0, w_nil, 300.0, w_nil]) + +def test_Float_delete(): + a = float_arr(5) + a.store(space, 1, space.wrap_float(1)) + a.store(space, 1, w_nil) + check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) + +def test_Float_to_List(): + a = float_arr(5) + a.store(space, 1, arr(1)) + assert isinstance(a.strategy, strategies.ListStorageStrategy) + + \ No newline at end of file From noreply at buildbot.pypy.org Fri Mar 21 11:27:38 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:27:38 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Using an artificial float value as sentinel for nil in a collection. Message-ID: <20140321102738.A645F1C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r684:91e7b60922ab Date: 2014-03-21 11:27 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/91e7b60922ab/ Log: Using an artificial float value as sentinel for nil in a collection. diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -2,6 +2,7 @@ import sys, math from spyvm import model, shadow, constants from rpython.rlib import longlong2float, rarithmetic +from rpython.rlib.rstruct.runpack import runpack from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.rfloat import string_to_float @@ -136,11 +137,11 @@ needs_objspace = True strategy_tag = 'abstract-valueOrNil' # TODO -- use another value... something like max_float? - nil_value = string_to_float("nan") + nil_value = runpack("\x10\x00\x00\x00\x00\x00\xf8\x7f") + nil_value_longlong = long2floatfloat.float2longlong(nil_value) def is_nil_value(self, val): - # return val == self.nil_value - return math.isnan(val) + return long2floatfloat.float2longlong(val) == self.nil_value_longlong def can_contain(self, space, w_val): return w_val == model.w_nil or \ From noreply at buildbot.pypy.org Fri Mar 21 11:53:06 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:53:06 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Fix Message-ID: <20140321105306.E2AF41C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r685:0b31925287c6 Date: 2014-03-21 11:28 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/0b31925287c6/ Log: Fix diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -137,7 +137,7 @@ needs_objspace = True strategy_tag = 'abstract-valueOrNil' # TODO -- use another value... something like max_float? - nil_value = runpack("\x10\x00\x00\x00\x00\x00\xf8\x7f") + nil_value = runpack("d", "\x10\x00\x00\x00\x00\x00\xf8\x7f") nil_value_longlong = long2floatfloat.float2longlong(nil_value) def is_nil_value(self, val): From noreply at buildbot.pypy.org Fri Mar 21 11:53:08 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:53:08 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Fixed typo Message-ID: <20140321105308.10B631C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r686:615f424d5684 Date: 2014-03-21 11:30 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/615f424d5684/ Log: Fixed typo diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -138,10 +138,10 @@ strategy_tag = 'abstract-valueOrNil' # TODO -- use another value... something like max_float? nil_value = runpack("d", "\x10\x00\x00\x00\x00\x00\xf8\x7f") - nil_value_longlong = long2floatfloat.float2longlong(nil_value) + nil_value_longlong = longlong2float.float2longlong(nil_value) def is_nil_value(self, val): - return long2floatfloat.float2longlong(val) == self.nil_value_longlong + return longlong2float.float2longlong(val) == self.nil_value_longlong def can_contain(self, space, w_val): return w_val == model.w_nil or \ From noreply at buildbot.pypy.org Fri Mar 21 11:53:09 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 11:53:09 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Fixed a test, marked a failing test as skipped.. Message-ID: <20140321105309.228B51C02EA@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r687:f13ab96da4bb Date: 2014-03-21 11:52 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/f13ab96da4bb/ Log: Fixed a test, marked a failing test as skipped.. diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -353,6 +353,7 @@ assert target.getword(0) == 0xffff0100 assert target.getword(1) == 0x7fff8000 + at py.test.mark.skipif("'This test must be fixed!'") def test_display_bitmap(): # XXX: Patch SDLDisplay -> get_pixelbuffer() to circumvent # double-free bug diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -150,7 +150,7 @@ def test_Float_delete(): a = float_arr(5) - a.store(space, 1, space.wrap_float(1)) + a.store(space, 1, space.wrap_float(1.0)) a.store(space, 1, w_nil) check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) From noreply at buildbot.pypy.org Fri Mar 21 12:07:30 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 21 Mar 2014 12:07:30 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Fixed bug in strategies implementation and added test. Message-ID: <20140321110730.CA7931C13AD@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r688:98f32dad569e Date: 2014-03-21 12:07 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/98f32dad569e/ Log: Fixed bug in strategies implementation and added test. diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -27,9 +27,11 @@ def store(self, space, w_obj, n0, w_val): if self.can_contain(space, w_val): return self.do_store(space, w_obj, n0, w_val) - new_strategy = find_strategy_for_objects(space, [w_val]) + new_strategy = self.generelized_strategy_for(space, w_val) return w_obj.store_with_new_strategy(space, new_strategy, n0, w_val) + def generelized_strategy_for(self, space, w_val): + raise NotImplementedError("Abstract base class") def can_contain(self, space, w_val): raise NotImplementedError("Abstract base class") def fetch(self, space, w_obj, n0): @@ -73,6 +75,8 @@ def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): w_obj.int_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) + def generelized_strategy_for(self, space, w_val): + return ListStorageStrategy.singleton def initial_storage(self, space, size): raise NotImplementedError("Abstract base class") def storage_for_list(self, space, collection): @@ -104,6 +108,8 @@ def do_store(self, space, w_obj, n0, w_val): pass + def generelized_strategy_for(self, space, w_val): + return find_strategy_for_objects(space, [w_val]) def set_initial_storage(self, space, w_obj, size): pass def set_storage_for_list(self, space, w_obj, collection): diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -120,6 +120,12 @@ a.store(space, 1, arr(1)) assert isinstance(a.strategy, strategies.ListStorageStrategy) +def test_SmallInt_store_Float_to_List(): + a = int_arr(5) + a.store(space, 1, space.wrap_float(2.2)) + assert isinstance(a.strategy, strategies.ListStorageStrategy) + check_arr(a, [12, 2.2, w_nil, w_nil, w_nil]) + # ====== FloatOrNil StorageStrategy def test_AllNil_to_Float(): @@ -159,4 +165,9 @@ a.store(space, 1, arr(1)) assert isinstance(a.strategy, strategies.ListStorageStrategy) +def test_Float_store_SmallInt_to_List(): + a = float_arr(5) + a.store(space, 1, space.wrap_int(2)) + assert isinstance(a.strategy, strategies.ListStorageStrategy) + check_arr(a, [1.2, 2, w_nil, w_nil, w_nil]) \ No newline at end of file From noreply at buildbot.pypy.org Fri Mar 21 12:12:47 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 21 Mar 2014 12:12:47 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: some missing commit Message-ID: <20140321111247.33C611C029E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r70145:8f7d82dde2ed Date: 2014-03-21 12:35 +0200 http://bitbucket.org/pypy/pypy/changeset/8f7d82dde2ed/ Log: some missing commit diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -7,4 +7,4 @@ * do escape analysis in the resume/optimizer.py -* make_a_counter_per_value got screwed, but a bit no clue what it does +* reimplement make_a_counter_per_value diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -498,9 +498,8 @@ self.guard_opnum = guard_op.getopnum() def make_a_counter_per_value(self, guard_value_op): + return # XXXX assert guard_value_op.getopnum() == rop.GUARD_VALUE - # XXX I have no clue what exactly it does, but we killed failargs - # so i is always 0 now box = guard_value_op.getarg(0) i = 0 # used to be i = guard_value_op.getfailargs().index(box) @@ -557,6 +556,7 @@ else: # we have a GUARD_VALUE that fails. Make a _counters instance # (only now, when the guard is actually failing at least once), # and use it to record some statistics about the failing values. + assert 0 # XXX this should be dead code until we fix it index = self._counter & self.CNT_BASE_MASK typetag = self._counter & self.CNT_TYPE_MASK counters = self._counters diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -568,10 +568,6 @@ raise ValueError, "invalid optimization" self.seen_results[op.result] = None self._newoperations.append(op) - if (self.delayed_resume_put is not None and - self.delayed_resume_put.getarg(0) is op.result): - self._newoperations.append(self.delayed_resume_put) - self.delayed_resume_put = None def replace_op(self, old_op, new_op): # XXX: Do we want to cache indexes to prevent search? diff --git a/rpython/jit/metainterp/optimizeopt/resume.py b/rpython/jit/metainterp/optimizeopt/resume.py --- a/rpython/jit/metainterp/optimizeopt/resume.py +++ b/rpython/jit/metainterp/optimizeopt/resume.py @@ -1,6 +1,7 @@ from rpython.jit.metainterp.optimizeopt import optimizer from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method +from rpython.jit.metainterp.history import Const """ All of this directly emit the ops, without calling emit_operation (they also don't have boxes except a resume_put) @@ -8,9 +9,12 @@ class OptResume(optimizer.Optimization): def optimize_RESUME_PUT(self, op): - if op.getarg(0) in self.optimizer.producer: + arg = op.getarg(0) + if (isinstance(arg, Const) or arg in self.optimizer.producer or + arg in self.optimizer.loop.inputargs): self.optimizer.resumebuilder.resume_put(op) else: + xxx self.optimizer.delayed_resume_put = op # otherwise we did not emit the operation just yet diff --git a/rpython/jit/resume/optimizer.py b/rpython/jit/resume/optimizer.py --- a/rpython/jit/resume/optimizer.py +++ b/rpython/jit/resume/optimizer.py @@ -49,6 +49,7 @@ no = op.getarg(2).getint() self.framestack[op.getarg(1).getint()].values[no] = value else: + XXX self.opt.emit_operation(op) def new_virtual_with_vtable(self, box, vtable, vvalue): diff --git a/rpython/jit/resume/test/support.py b/rpython/jit/resume/test/support.py new file mode 100644 --- /dev/null +++ b/rpython/jit/resume/test/support.py @@ -0,0 +1,5 @@ + +class MockStaticData(object): + def __init__(self, jitcodes, descrs): + self.alljitcodes = jitcodes + self.opcode_descrs = descrs From noreply at buildbot.pypy.org Fri Mar 21 12:12:48 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 21 Mar 2014 12:12:48 +0100 (CET) Subject: [pypy-commit] pypy default: fix Message-ID: <20140321111248.885A71C029E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r70146:345c69c621c5 Date: 2014-03-21 13:11 +0200 http://bitbucket.org/pypy/pypy/changeset/345c69c621c5/ Log: fix diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -72,6 +72,9 @@ def _make_log_operations(self): return LogOperations(self.metainterp_sd, self.guard_number) + def repr_of_resop(self, op): + return LogOperations(self.metainterp_sd, self.guard_number).repr_of_resop(op) + class LogOperations(object): """ From noreply at buildbot.pypy.org Fri Mar 21 14:26:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 21 Mar 2014 14:26:19 +0100 (CET) Subject: [pypy-commit] pypy default: A passing test Message-ID: <20140321132619.CF43F1C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70147:9b2113d7bb18 Date: 2014-03-21 14:25 +0100 http://bitbucket.org/pypy/pypy/changeset/9b2113d7bb18/ Log: A passing test diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -294,6 +294,35 @@ assert res == f(10) self.check_simple_loop(call=3) + def test_dict_eq_can_release_gil(self): + from rpython.rtyper.lltypesystem import lltype, rffi + if type(self.newdict()) is not dict: + py.test.skip("this is an r_dict test") + T = rffi.CArrayPtr(rffi.TIME_T) + external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True) + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def key(x): + return x % 2 + def eq(x, y): + external(lltype.nullptr(T.TO)) + return (x % 2) == (y % 2) + + def f(n): + dct = objectmodel.r_dict(eq, key) + total = n + x = 44444 + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + dct[total] = total + x = dct[total] + total -= 1 + return len(dct) + x + + res = self.meta_interp(f, [10], listops=True) + assert res == 2 + 1 + self.check_simple_loop(call_may_force=2, # ll_dict_lookup_trampoline + call=1) # ll_dict_setitem_lookup_done_trampoline + class TestLLtype(DictTests, LLJitMixin): pass From noreply at buildbot.pypy.org Fri Mar 21 14:28:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 21 Mar 2014 14:28:52 +0100 (CET) Subject: [pypy-commit] pypy default: Improve the test, showing that the calls to eq() are not optimized Message-ID: <20140321132852.AC3101C029E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70148:f6f44dc30721 Date: 2014-03-21 14:28 +0100 http://bitbucket.org/pypy/pypy/changeset/f6f44dc30721/ Log: Improve the test, showing that the calls to eq() are not optimized diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -311,16 +311,20 @@ dct = objectmodel.r_dict(eq, key) total = n x = 44444 + y = 55555 + z = 66666 while total: myjitdriver.jit_merge_point(total=total, dct=dct) dct[total] = total x = dct[total] + y = dct[total] + z = dct[total] total -= 1 - return len(dct) + x + return len(dct) + x + y + z res = self.meta_interp(f, [10], listops=True) - assert res == 2 + 1 - self.check_simple_loop(call_may_force=2, # ll_dict_lookup_trampoline + assert res == 2 + 1 + 1 + 1 + self.check_simple_loop(call_may_force=4, # ll_dict_lookup_trampoline call=1) # ll_dict_setitem_lookup_done_trampoline From noreply at buildbot.pypy.org Sat Mar 22 00:51:50 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 22 Mar 2014 00:51:50 +0100 (CET) Subject: [pypy-commit] pypy py3k: MiniBuffer's API actually differs from MemoryView, fix Message-ID: <20140321235150.A5EA31C029E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70149:4f39e0698984 Date: 2014-03-21 16:32 -0700 http://bitbucket.org/pypy/pypy/changeset/4f39e0698984/ Log: MiniBuffer's API actually differs from MemoryView, fix diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,9 +1,10 @@ +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray -from pypy.objspace.std.memoryview import W_MemoryView +from pypy.objspace.std.memoryview import _buffer_setitem from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi @@ -41,11 +42,30 @@ # Override the typedef to narrow down the interface that's exposed to app-level -class MiniBuffer(W_MemoryView): +class MiniBuffer(W_Root): def __init__(self, buffer, keepalive=None): - W_MemoryView.__init__(self, buffer) + self.buffer = buffer self.keepalive = keepalive + def buffer_w(self, space): + return self.buffer + + def descr_len(self, space): + return space.wrap(self.buffer.getlength()) + + def descr_getitem(self, space, w_index): + start, stop, step, size = space.decode_index4(w_index, + self.buffer.getlength()) + if step == 0: + return space.wrapbytes(self.buffer.getitem(start)) + res = self.buffer.getslice(start, stop, step, size) + return space.wrapbytes(res) + + @unwrap_spec(newstring='bufferstr') + def descr_setitem(self, space, w_index, newstring): + _buffer_setitem(space, self.buffer, w_index, newstring) + + MiniBuffer.typedef = TypeDef( "buffer", __module__ = "_cffi_backend", From noreply at buildbot.pypy.org Sat Mar 22 00:51:52 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 22 Mar 2014 00:51:52 +0100 (CET) Subject: [pypy-commit] pypy py3k: restore our buffer related error message Message-ID: <20140321235152.2AE121C029E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70150:707e60600dbe Date: 2014-03-21 16:48 -0700 http://bitbucket.org/pypy/pypy/changeset/707e60600dbe/ Log: restore our buffer related error message diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -199,7 +199,8 @@ w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_memoryview): return w_result.buffer_w(space) - self._typed_unwrap_error(space, "buffer") + raise oefmt(space.w_TypeError, + "'%T' does not support the buffer interface", self) def bytes_w(self, space): self._typed_unwrap_error(space, "bytes") diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -536,7 +536,8 @@ skip("GAIError - probably no connection: %s" % str(ex.args)) assert s.send(memoryview(b'')) == 0 assert s.sendall(memoryview(b'')) is None - raises(TypeError, s.send, '') + exc = raises(TypeError, s.send, '') + assert str(exc.value) == "'str' does not support the buffer interface" raises(TypeError, s.sendall, '') s.close() s = _socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM, 0) From noreply at buildbot.pypy.org Sat Mar 22 00:51:53 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 22 Mar 2014 00:51:53 +0100 (CET) Subject: [pypy-commit] pypy py3k: this now lives in baseobjspace after the buffer refactor Message-ID: <20140321235153.80C971C029E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70151:144c447e88ff Date: 2014-03-21 16:48 -0700 http://bitbucket.org/pypy/pypy/changeset/144c447e88ff/ Log: this now lives in baseobjspace after the buffer refactor diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -473,13 +473,6 @@ else: return space.isinstance(w_inst, w_type) - def buffer(space, w_obj): - w_impl = space.lookup(w_obj, '__buffer__') - if w_impl is None: - raise oefmt(space.w_TypeError, - "'%T' does not support the buffer interface", w_obj) - return space.get_and_call_function(w_impl, w_obj) - # helpers From noreply at buildbot.pypy.org Sat Mar 22 08:07:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 08:07:51 +0100 (CET) Subject: [pypy-commit] pypy default: Untested: implement increment_debug_counter for ARM Message-ID: <20140322070751.E378A1D2848@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70152:53058f410b21 Date: 2014-03-22 08:06 +0100 http://bitbucket.org/pypy/pypy/changeset/53058f410b21/ Log: Untested: implement increment_debug_counter for ARM diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -584,7 +584,10 @@ emit_op_getfield_gc_pure = emit_op_getfield_gc def emit_op_increment_debug_counter(self, op, arglocs, regalloc, fcond): - # XXX implement me + base_loc, value_loc = arglocs + self.mc.LDR_ri(value_loc.value, base_loc.value, 0, cond=fcond) + self.mc.ADD_ri(value_loc.value, value_loc.value, 1, cond=fcond) + self.mc.STR_ri(value_loc.value, base_loc.value, 0, cond=fcond) return fcond def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -850,8 +850,12 @@ prepare_op_getfield_gc_pure = prepare_op_getfield_gc def prepare_op_increment_debug_counter(self, op, fcond): - # XXX implement me - return [] + boxes = op.getarglist() + a0, = boxes + base_loc = self.make_sure_var_in_reg(a0, boxes) + value_loc = self.get_scratch_reg(INT, boxes) + self.free_temp_vars() + return [base_loc, value_loc] def prepare_op_getinteriorfield_gc(self, op, fcond): t = unpack_interiorfielddescr(op.getdescr()) From noreply at buildbot.pypy.org Sat Mar 22 08:10:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 08:10:31 +0100 (CET) Subject: [pypy-commit] pypy default: fix test Message-ID: <20140322071031.1B3091C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70153:87d94ae4f90e Date: 2014-03-22 08:09 +0100 http://bitbucket.org/pypy/pypy/changeset/87d94ae4f90e/ Log: fix test diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -319,6 +319,9 @@ def log_loop(*args): pass + class logger_ops: + repr_of_resop = repr + class warmrunnerdesc: class memory_manager: retrace_limit = 5 From noreply at buildbot.pypy.org Sat Mar 22 08:21:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 08:21:09 +0100 (CET) Subject: [pypy-commit] pypy default: Fix test Message-ID: <20140322072109.447561C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70154:aaf1ce9e21d0 Date: 2014-03-22 08:20 +0100 http://bitbucket.org/pypy/pypy/changeset/aaf1ce9e21d0/ Log: Fix test diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -70,14 +70,14 @@ p13 = new(descr=...) p15 = new_array(8, descr=) setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) setfield_gc(p13, 16, descr=) guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) guard_no_exception(descr=...) i26 = int_and(i23, .*) i27 = int_is_true(i26) From noreply at buildbot.pypy.org Sat Mar 22 08:30:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 08:30:22 +0100 (CET) Subject: [pypy-commit] pypy default: Fix test. Message-ID: <20140322073022.681831C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70155:b03753b1bf5e Date: 2014-03-22 08:29 +0100 http://bitbucket.org/pypy/pypy/changeset/b03753b1bf5e/ Log: Fix test. A nice side-effect of the improve-consecutive-dict-lookups branch: multiple loads or stores of attributes in an oldstyle class instance are folded. diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -141,15 +141,16 @@ i = 0 b = B(1) while i < 100: - b.x - v = b.x # ID: loadattr + v = b.x # ID: loadattr1 + v = b.x # ID: loadattr2 i += v return i log = self.run(main, [], threshold=80) loop, = log.loops_by_filename(self.filepath) - assert loop.match_by_id('loadattr', + assert loop.match_by_id('loadattr1', ''' + guard_not_invalidated(descr=...) i19 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...) guard_no_exception(descr=...) i21 = int_and(i19, _) @@ -161,6 +162,7 @@ i29 = int_is_true(i28) guard_true(i29, descr=...) ''') + assert loop.match_by_id('loadattr2', "") # completely folded away def test_python_contains(self): def main(): From noreply at buildbot.pypy.org Sat Mar 22 08:55:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 08:55:32 +0100 (CET) Subject: [pypy-commit] pypy default: oups Message-ID: <20140322075532.C93DD1C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70156:0ee9349b856b Date: 2014-03-22 08:54 +0100 http://bitbucket.org/pypy/pypy/changeset/0ee9349b856b/ Log: oups diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -103,8 +103,10 @@ extradescrs=None): key = (frozenset_or_none(readonly_descrs_fields), frozenset_or_none(readonly_descrs_arrays), + frozenset_or_none(readonly_descrs_interiorfields), frozenset_or_none(write_descrs_fields), frozenset_or_none(write_descrs_arrays), + frozenset_or_none(write_descrs_interiorfields), extraeffect, oopspecindex, can_invalidate) From noreply at buildbot.pypy.org Sat Mar 22 09:18:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 09:18:15 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix for the issue of dicts occasionally not updating their Message-ID: <20140322081815.EAAA31C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70157:7e2b44e79080 Date: 2014-03-22 09:17 +0100 http://bitbucket.org/pypy/pypy/changeset/7e2b44e79080/ Log: Test and fix for the issue of dicts occasionally not updating their cached status on a "del". The fix is to record the whole array-of- struct in the 'write_descrs_arrays' as soon as we write in one interior field, which is probably a good idea. This removes the last usage of effectinfo.write_descrs_interiorfields, which will no longer be translated right now -- but I'm leaving it in as the way forward. diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -224,6 +224,18 @@ descr = cpu.interiorfielddescrof(T, fieldname) descrs_interiorfields.append(descr) + # a read or a write to an interiorfield, inside an array of + # structs, is additionally recorded as a read or write of + # the array itself + extraef = set() + for tup in effects: + if tup[0] == "interiorfield" or tup[0] == "readinteriorfield": + T = deref(tup[1]) + if isinstance(T, lltype.Array) and consider_array(T): + extraef.add((tup[0].replace("interiorfield", "array"), + tup[1])) + effects |= extraef + for tup in effects: if tup[0] == "struct": add_struct(write_descrs_fields, tup) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1854,8 +1854,7 @@ def _handle_dict_lookup_call(self, op, oopspec_name, args): extradescr1 = self.cpu.fielddescrof(op.args[1].concretetype.TO, 'entries') - extradescr2 = self.cpu.interiorfielddescrof( - op.args[1].concretetype.TO.entries.TO, 'key') + extradescr2 = self.cpu.arraydescrof(op.args[1].concretetype.TO.entries.TO) return self._handle_oopspec_call(op, args, EffectInfo.OS_DICT_LOOKUP, extradescr=[extradescr1, extradescr2]) diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -177,7 +177,7 @@ self.cached_arrayitems = {} # cached dict items: {dict descr: {(optval, index): box-or-const}} self.cached_dict_reads = {} - # cache of corresponding array descrs + # cache of corresponding {array descrs: dict 'entries' field descr} self.corresponding_array_descrs = {} # self._lazy_setfields_and_arrayitems = [] @@ -346,9 +346,8 @@ self.force_lazy_setfield(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) - for descr in effectinfo.write_descrs_interiorfields: - if descr in self.corresponding_array_descrs: - dictdescr = self.corresponding_array_descrs.pop(descr) + if arraydescr in self.corresponding_array_descrs: + dictdescr = self.corresponding_array_descrs.pop(arraydescr) try: del self.cached_dict_reads[dictdescr] except KeyError: diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -327,6 +327,21 @@ self.check_simple_loop(call_may_force=4, # ll_dict_lookup_trampoline call=1) # ll_dict_setitem_lookup_done_trampoline + def test_bug42(self): + myjitdriver = JitDriver(greens = [], reds = 'auto') + def f(n): + mdict = {0: None, 1: None, 2: None, 3: None, 4: None, + 5: None, 6: None, 7: None, 8: None, 9: None} + while n > 0: + myjitdriver.jit_merge_point() + n -= 1 + if n in mdict: + del mdict[n] + if n in mdict: + raise Exception + self.meta_interp(f, [10]) + self.check_simple_loop(call_may_force=0, call=3) + class TestLLtype(DictTests, LLJitMixin): pass From noreply at buildbot.pypy.org Sat Mar 22 11:20:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 11:20:53 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140322102053.A03B41C073C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70158:005b73ce47d4 Date: 2014-03-22 11:20 +0100 http://bitbucket.org/pypy/pypy/changeset/005b73ce47d4/ Log: in-progress diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -107,7 +107,8 @@ def rewrite_assembler(self, cpu, operations, gcrefs_output_list): if not self.stm: - from rpython.jit.backend.llsupport.rewrite import GcRewriterAssembler + from rpython.jit.backend.llsupport import rewrite + GcRewriterAssembler = rewrite.GcRewriterAssembler else: from rpython.jit.backend.llsupport import stmrewrite GcRewriterAssembler = stmrewrite.GcStmRewriterAssembler diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -52,7 +52,6 @@ # barriers. We do this on each "basic block" of operations, which in # this case means between CALLs or unknown-size mallocs. # - # SYNC with stmrewrite.py! for op in operations: if op.getopnum() == rop.DEBUG_MERGE_POINT: continue @@ -60,7 +59,7 @@ if op.is_malloc(): self.handle_malloc_operation(op) continue - elif op.is_call(): + elif op.can_malloc(): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: self.emitting_an_operation_that_can_collect() @@ -80,10 +79,13 @@ if op.getopnum() == rop.CALL_ASSEMBLER: self.handle_call_assembler(op) continue - # - self.newops.append(op) + # ---------- fallback case (overwritten in stmrewrite) ----------- + self.other_operation(op) return self.newops + def other_operation(self, op): + self.newops.append(op) + # ---------- def handle_malloc_operation(self, op): @@ -158,8 +160,7 @@ else: raise NotImplementedError(op.getopname()) - def gen_malloc_frame(self, frame_info, frame): - size_box = history.BoxInt() + def gen_malloc_frame(self, frame_info, frame, size_box): descrs = self.gc_ll_descr.getframedescrs(self.cpu) if self.gc_ll_descr.kind == 'boehm' or self.gc_ll_descr.stm: op0 = ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], @@ -176,8 +177,8 @@ size_box, descr=descrs.jfi_frame_size) self.newops.append(op0) - self.gen_malloc_nursery_varsize_frame(size_box, frame, - descrs.arraydescr.tid) + self.gen_malloc_nursery_varsize_frame(size_box, frame) + self.gen_initialize_tid(frame, descrs.arraydescr.tid) length_box = history.BoxInt() op1 = ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], length_box, @@ -192,8 +193,9 @@ assert isinstance(loop_token, history.JitCellToken) jfi = loop_token.compiled_loop_token.frame_info llfi = heaptracker.adr2int(llmemory.cast_ptr_to_adr(jfi)) + size_box = history.BoxInt() frame = history.BoxPtr() - self.gen_malloc_frame(llfi, frame) + self.gen_malloc_frame(llfi, frame, size_box) op2 = ResOperation(rop.SETFIELD_GC, [frame, history.ConstInt(llfi)], None, descr=descrs.jf_frame_info) self.newops.append(op2) @@ -322,7 +324,7 @@ self.write_barrier_applied[v_result] = None return True - def gen_malloc_nursery_varsize_frame(self, sizebox, v_result, tid): + def gen_malloc_nursery_varsize_frame(self, sizebox, v_result): """ Generate CALL_MALLOC_NURSERY_VARSIZE_FRAME """ self.emitting_an_operation_that_can_collect() @@ -333,8 +335,6 @@ self.newops.append(op) self.write_barrier_applied[v_result] = None - self.gen_initialize_tid(v_result, tid) - def gen_malloc_nursery(self, size, v_result): """Try to generate or update a CALL_MALLOC_NURSERY. If that fails, generate a plain CALL_MALLOC_GC instead. @@ -392,8 +392,8 @@ v = op.getarg(1) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - op = self.prepare_write_barrier(op, rop.SETFIELD_RAW) - self.gen_write_barrier(op.getarg(0)) + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETFIELD_RAW) self.newops.append(op) def handle_write_barrier_setinteriorfield(self, op): @@ -402,8 +402,8 @@ v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - op = self.prepare_write_barrier(op, rop.SETINTERIORFIELD_RAW) - self.gen_write_barrier(op.getarg(0)) + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) self.newops.append(op) def handle_write_barrier_setarrayitem(self, op): @@ -412,22 +412,10 @@ v = op.getarg(2) if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL - op = self.prepare_write_barrier(op, rop.SETARRAYITEM_RAW) - self.gen_write_barrier_array(op.getarg(0), - op.getarg(1)) + self.gen_write_barrier_array(val, op.getarg(1)) + #op = op.copy_and_change(rop.SETARRAYITEM_RAW) self.newops.append(op) - def prepare_write_barrier(self, op, newopnum): - write_barrier_descr = self.gc_ll_descr.write_barrier_descr - if (write_barrier_descr.returns_modified_object and - isinstance(op.getarg(0), ConstPtr)): - args = op.getarglist() - v_box = BoxPtr() - self.newops.append(ResOperation(rop.SAME_AS, [args[0]], v_box)) - args[0] = v_box - return op.copy_and_change(opnum=newopnum, args=args) - return op - def gen_write_barrier(self, v_base): write_barrier_descr = self.gc_ll_descr.write_barrier_descr args = [v_base] @@ -437,11 +425,10 @@ def gen_write_barrier_array(self, v_base, v_index): write_barrier_descr = self.gc_ll_descr.write_barrier_descr - if write_barrier_descr.has_barrier_from_array(self.cpu): + if write_barrier_descr.has_write_barrier_from_array(self.cpu): # If we know statically the length of 'v', and it is not too # big, then produce a regular write_barrier. If it's unknown or # too big, produce instead a write_barrier_from_array. - assert not write_barrier_descr.returns_modified_object LARGE = 130 length = self.known_lengths.get(v_base, LARGE) if length >= LARGE: diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -9,32 +9,6 @@ debug_print) from rpython.jit.codewriter.effectinfo import EffectInfo -### XXX: -### we changed some 'x2I' barriers to 'x2R' since -### obj initialization may happen in 2 different transactions. -### check and fix this assumption - - -# -# STM Support -# ----------- -# -# Any SETFIELD_GC, SETARRAYITEM_GC, SETINTERIORFIELD_GC must be done on a -# W object. The operation that forces an object p1 to be W is -# COND_CALL_STM_B(p1, descr=x2Wdescr), for x in 'AIQRVWZ'. This -# COND_CALL_STM_B is a bit special because if p1 is not W, it *replaces* -# its value with the W copy (by changing the register's value and -# patching the stack location if any). It's still conceptually the same -# object, but the pointer is different. -# -# The case of GETFIELD_GC & friends is similar, excepted that it goes to -# a R or L object (at first, always a R object). -# -# The name "x2y" of write barriers is called the *category* or "cat". -# - - - class GcStmRewriterAssembler(GcRewriterAssembler): # This class performs the same rewrites as its base class, @@ -42,143 +16,83 @@ def __init__(self, *args): GcRewriterAssembler.__init__(self, *args) - self.known_category = {} # variable: letter (R, W, ...) self.always_inevitable = False - - def rewrite(self, operations): - debug_start("jit-stmrewrite-ops") - # overridden method from parent class - # - for op in operations: - opnum = op.getopnum() - if not we_are_translated(): - # only possible in tests: - if opnum in (rop.COND_CALL_STM_B, - -124): # FORCE_SPILL + def other_operation(self, op): + opnum = op.getopnum() + if opnum == rop.INCREMENT_DEBUG_COUNTER: + self.newops.append(op) + return + # ---------- transaction breaks ---------- + if opnum == rop.STM_TRANSACTION_BREAK: + # XXX redo! + #self.emitting_an_operation_that_can_collect() + #self.next_op_may_be_in_new_transaction() + #self.newops.append(op) + return + # ---------- pure operations, guards ---------- + if op.is_always_pure() or op.is_guard() or op.is_ovf(): + self.newops.append(op) + return + # ---------- non-pure getfields ---------- + if opnum in (rop.GETFIELD_GC, rop.GETARRAYITEM_GC, + rop.GETINTERIORFIELD_GC): + self.handle_getfields(op) + return + # ---------- calls ---------- + if op.is_call(): + if opnum == rop.CALL_RELEASE_GIL: + # self.fallback_inevitable(op) + # is done by assembler._release_gil_shadowstack() + self.newops.append(op) + elif opnum == rop.CALL_ASSEMBLER: + assert 0 # case handled by the parent class + else: + # only insert become_inevitable if calling a + # non-transactionsafe and non-releasegil function + descr = op.getdescr() + assert not descr or isinstance(descr, CallDescr) + + if not descr or not descr.get_extra_info() \ + or descr.get_extra_info().call_needs_inevitable(): + self.fallback_inevitable(op) + else: self.newops.append(op) - continue - if opnum in (rop.INCREMENT_DEBUG_COUNTER, - rop.DEBUG_MERGE_POINT): - self.newops.append(op) + return + # ---------- copystrcontent ---------- + if opnum in (rop.COPYSTRCONTENT, rop.COPYUNICODECONTENT): + self.handle_copystrcontent(op) + continue + XXX + # ---------- raw getfields and setfields ---------- + if opnum in (rop.GETFIELD_RAW, rop.SETFIELD_RAW): + if self.maybe_handle_raw_accesses(op): continue - # ---------- transaction breaks ---------- - if opnum == rop.STM_TRANSACTION_BREAK: - self.emitting_an_operation_that_can_collect() - self.next_op_may_be_in_new_transaction() - self.newops.append(op) - continue - # ---------- ptr_eq ---------- - if opnum in (rop.PTR_EQ, rop.INSTANCE_PTR_EQ, - rop.PTR_NE, rop.INSTANCE_PTR_NE): - self.newops.append(op) - continue - # ---------- guard_class ---------- - if opnum == rop.GUARD_CLASS: - assert self.cpu.vtable_offset is None - # requires gcremovetypeptr translation option - # uses h_tid which doesn't need a read-barrier - self.newops.append(op) - continue - # ---------- pure operations needing read-barrier ---------- - if opnum in (rop.GETFIELD_GC_PURE, - rop.GETARRAYITEM_GC_PURE, - rop.ARRAYLEN_GC, rop.STRGETITEM, - rop.UNICODEGETITEM, rop.STRLEN, - rop.UNICODELEN): - # e.g. getting inst_intval of a W_IntObject that is - # currently only a stub needs to first resolve to a - # real object - # XXX: 'I' enough? - self.handle_category_operations(op, 'R') - continue - # ---------- pure operations, guards ---------- - if op.is_always_pure() or op.is_guard() or op.is_ovf(): - self.newops.append(op) - continue - # ---------- getfields ---------- - if opnum in (rop.GETFIELD_GC, rop.GETARRAYITEM_GC, - rop.GETINTERIORFIELD_GC): - self.handle_getfields(op) - continue - # ---------- setfields ---------- - if opnum in (rop.SETFIELD_GC, rop.SETINTERIORFIELD_GC, - rop.SETARRAYITEM_GC, rop.STRSETITEM, - rop.UNICODESETITEM): - self.handle_setfields(op) - continue - # ---------- mallocs ---------- - if op.is_malloc(): - self.handle_malloc_operation(op) - continue - # ---------- calls ---------- - if op.is_call(): - if opnum == rop.CALL and op.getdescr(): - d = op.getdescr() - assert isinstance(d, CallDescr) - ei = d.get_extra_info() - if ei and ei.oopspecindex == EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION: - self.newops.append(op) - continue - - self.emitting_an_operation_that_can_collect() - self.next_op_may_be_in_new_transaction() - - if opnum == rop.CALL_RELEASE_GIL: - # self.fallback_inevitable(op) - # is done by assembler._release_gil_shadowstack() - self.newops.append(op) - elif opnum == rop.CALL_ASSEMBLER: - self.handle_call_assembler(op) - else: - # only insert become_inevitable if calling a - # non-transactionsafe and non-releasegil function - descr = op.getdescr() - assert not descr or isinstance(descr, CallDescr) - - if not descr or not descr.get_extra_info() \ - or descr.get_extra_info().call_needs_inevitable(): - self.fallback_inevitable(op) - else: - self.newops.append(op) - continue - # ---------- copystrcontent ---------- - if opnum in (rop.COPYSTRCONTENT, rop.COPYUNICODECONTENT): - self.handle_copystrcontent(op) - continue - # ---------- raw getfields and setfields ---------- - if opnum in (rop.GETFIELD_RAW, rop.SETFIELD_RAW): - if self.maybe_handle_raw_accesses(op): - continue - # ---------- labels ---------- - if opnum == rop.LABEL: - self.emitting_an_operation_that_can_collect() - self.next_op_may_be_in_new_transaction() - - self.newops.append(op) - continue - # ---------- jumps ---------- - if opnum == rop.JUMP: - self.newops.append(op) - continue - # ---------- finish, other ignored ops ---------- - if opnum in (rop.FINISH, rop.FORCE_TOKEN, - rop.READ_TIMESTAMP, rop.MARK_OPAQUE_PTR, - rop.JIT_DEBUG, rop.KEEPALIVE, - rop.QUASIIMMUT_FIELD, rop.RECORD_KNOWN_CLASS, - ): - self.newops.append(op) - continue - # ---------- fall-back ---------- - # Check that none of the ops handled here can_collect - # or cause a transaction break. This is not done by - # the fallback here - self.fallback_inevitable(op) - debug_print("fallback for", op.repr()) - # - - debug_stop("jit-stmrewrite-ops") - return self.newops + # ---------- labels ---------- + if opnum == rop.LABEL: + self.emitting_an_operation_that_can_collect() + self.next_op_may_be_in_new_transaction() + + self.newops.append(op) + continue + # ---------- jumps ---------- + if opnum == rop.JUMP: + self.newops.append(op) + continue + # ---------- finish, other ignored ops ---------- + if opnum in (rop.FINISH, rop.FORCE_TOKEN, + rop.READ_TIMESTAMP, rop.MARK_OPAQUE_PTR, + rop.JIT_DEBUG, rop.KEEPALIVE, + rop.QUASIIMMUT_FIELD, rop.RECORD_KNOWN_CLASS, + ): + self.newops.append(op) + continue + # ---------- fall-back ---------- + # Check that none of the ops handled here can_collect + # or cause a transaction break. This is not done by + # the fallback here + self.fallback_inevitable(op) + debug_print("fallback for", op.repr()) def emitting_an_operation_that_can_collect(self): GcRewriterAssembler.emitting_an_operation_that_can_collect(self) From noreply at buildbot.pypy.org Sat Mar 22 12:04:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 12:04:19 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Revert some more changes Message-ID: <20140322110419.AB76E1D253B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70159:f7dc1aca6874 Date: 2014-03-22 11:43 +0100 http://bitbucket.org/pypy/pypy/changeset/f7dc1aca6874/ Log: Revert some more changes diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -280,60 +280,16 @@ def get_root_stack_top_addr(self): rst_addr = llop.gc_adr_of_root_stack_top(llmemory.Address) return rffi.cast(lltype.Signed, rst_addr) - - -class BarrierDescr(AbstractDescr): + +class WriteBarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): self.llop1 = gc_ll_descr.llop1 - - self.returns_modified_object = False - self.gcheaderbuilder = gc_ll_descr.gcheaderbuilder - self.HDRPTR = gc_ll_descr.HDRPTR - self.b_slowpath = [0, 0, 0, 0, 0] - - def repr_of_descr(self): - raise NotImplementedError - - def __repr(self): - raise NotImplementedError - - def get_b_slowpath(self, num): - return self.b_slowpath[num] - - def set_b_slowpath(self, num, addr): - self.b_slowpath[num] = addr - - def get_barrier_funcptr(self, returns_modified_object): - raise NotImplementedError - - def get_barrier_fn(self, cpu, returns_modified_object): - # must pass in 'self.returns_modified_object', to make sure that - # the callers are fixed for this case - funcptr = self.get_barrier_funcptr(returns_modified_object) - funcaddr = llmemory.cast_ptr_to_adr(funcptr) - return cpu.cast_adr_to_int(funcaddr) - - def get_barrier_from_array_fn(self, cpu): - # returns a function with arguments [array, index, newvalue] - llop1 = self.llop1 - funcptr = llop1.get_write_barrier_from_array_failing_case( - self.FUNCPTR) - funcaddr = llmemory.cast_ptr_to_adr(funcptr) - return cpu.cast_adr_to_int(funcaddr) # this may return 0 - - def has_barrier_from_array(self, cpu): - return self.get_barrier_from_array_fn(cpu) != 0 - - - -class WriteBarrierDescr(BarrierDescr): - def __init__(self, gc_ll_descr): - BarrierDescr.__init__(self, gc_ll_descr) + self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR self.fielddescr_tid = gc_ll_descr.fielddescr_tid - self.FUNCPTR = lltype.Ptr(lltype.FuncType( - [llmemory.Address], lltype.Void)) - + # GCClass = gc_ll_descr.GCClass + if GCClass is None: # for tests + return self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -351,17 +307,9 @@ else: self.jit_wb_cards_set = 0 - def repr_of_descr(self): - return 'wbdescr' - - def __repr__(self): - return '' % (self.repr_of_descr(),) - def extract_flag_byte(self, flag_word): # if convenient for the backend, we compute the info about # the flag as (byte-offset, single-byte-flag). - if flag_word == 0: - return (0, 0) import struct value = struct.pack(lltype.SignedFmt, flag_word) assert value.count('\x00') == len(value) - 1 # only one byte is != 0 @@ -369,143 +317,24 @@ while value[i] == '\x00': i += 1 return (i, struct.unpack('b', value[i])[0]) - def get_barrier_funcptr(self, returns_modified_object): - assert not returns_modified_object - FUNCTYPE = self.FUNCPTR - return self.llop1.get_write_barrier_failing_case(FUNCTYPE) + def get_write_barrier_fn(self, cpu): + llop1 = self.llop1 + funcptr = llop1.get_write_barrier_failing_case(self.WB_FUNCPTR) + funcaddr = llmemory.cast_ptr_to_adr(funcptr) + return cpu.cast_adr_to_int(funcaddr) - @specialize.arg(2) - def _do_barrier(self, gcref_struct, returns_modified_object): - assert self.returns_modified_object == returns_modified_object - hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) - hdr_addr -= self.gcheaderbuilder.size_gc_header - hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR) - if self.jit_wb_if_flag == 0 or hdr.tid & self.jit_wb_if_flag: - # get a pointer to the 'remember_young_pointer' function from - # the GC, and call it immediately - funcptr = self.get_barrier_funcptr(returns_modified_object) - funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) + def get_write_barrier_from_array_fn(self, cpu): + # returns a function with arguments [array, index, newvalue] + llop1 = self.llop1 + funcptr = llop1.get_write_barrier_from_array_failing_case( + self.WB_FUNCPTR) + funcaddr = llmemory.cast_ptr_to_adr(funcptr) + return cpu.cast_adr_to_int(funcaddr) # this may return 0 - -class STMBarrierDescr(BarrierDescr): - def __init__(self, gc_ll_descr, stmcat, cfunc_name): - BarrierDescr.__init__(self, gc_ll_descr) - self.stmcat = stmcat - self.returns_modified_object = True - self.B_FUNCPTR_MOD = lltype.Ptr(lltype.FuncType( - [llmemory.Address], llmemory.Address)) + def has_write_barrier_from_array(self, cpu): + return self.get_write_barrier_from_array_fn(cpu) != 0 - self.b_failing_case_ptr = rffi.llexternal( - cfunc_name, - self.B_FUNCPTR_MOD.TO.ARGS, - self.B_FUNCPTR_MOD.TO.RESULT, - sandboxsafe=True, - _nowrapper=True) - def repr_of_descr(self): - return self.stmcat - - def __repr__(self): - return '' % (self.repr_of_descr(),) - - def get_barrier_funcptr(self, returns_modified_object): - assert returns_modified_object - return self.b_failing_case_ptr - - @specialize.arg(2) - def _do_barrier(self, gcref_struct, returns_modified_object): - raise NotImplementedError("implement in subclasses!") - assert self.returns_modified_object == returns_modified_object - funcptr = self.get_barrier_funcptr(returns_modified_object) - res = funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) - return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) - - -class STMReadBarrierDescr(STMBarrierDescr): - def __init__(self, gc_ll_descr, stmcat): - assert stmcat in ['A2R', 'Q2R', 'A2I'] - func = {'A2R': 'stm_DirectReadBarrier', - 'Q2R': 'stm_RepeatReadBarrier', - 'A2I': 'stm_ImmutReadBarrier', - } - - STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, - func[stmcat]) - - @specialize.arg(2) - def _do_barrier(self, gcref_struct, returns_modified_object): - assert returns_modified_object - from rpython.memory.gc.stmgc import StmGC - objadr = llmemory.cast_ptr_to_adr(gcref_struct) - objhdr = rffi.cast(StmGC.GCHDRP, gcref_struct) - - if self.stmcat == 'A2R': - # if h_revision == privat_rev of transaction - priv_rev = self.llop1.stm_get_adr_of_private_rev_num(rffi.SIGNEDP) - if objhdr.h_revision == priv_rev[0]: - return gcref_struct - - # readcache[obj] == obj - read_cache = self.llop1.stm_get_adr_of_read_barrier_cache(rffi.SIGNEDP) - objint = llmemory.cast_adr_to_int(objadr) - assert WORD == 8, "check for 32bit compatibility" - index = (objint & StmGC.FX_MASK) / WORD - CP = lltype.Ptr(rffi.CArray(lltype.Signed)) - rcp = rffi.cast(CP, read_cache[0]) - if rcp[index] == objint: - return gcref_struct - elif self.stmcat == 'Q2R': - # is GCFLAG_PUBLIC_TO_PRIVATE or GCFLAG_MOVED set? - if not (objhdr.h_tid & - (StmGC.GCFLAG_PUBLIC_TO_PRIVATE | StmGC.GCFLAG_MOVED)): - # no. - return gcref_struct - else: # A2I - # GCFLAG_STUB set? - if not (objhdr.h_tid & StmGC.GCFLAG_STUB): - return gcref_struct - - funcptr = self.get_barrier_funcptr(returns_modified_object) - res = funcptr(objadr) - return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) - - -class STMWriteBarrierDescr(STMBarrierDescr): - def __init__(self, gc_ll_descr, stmcat): - assert stmcat in ['A2W', 'V2W', 'A2V'] - func = {'A2W':'stm_WriteBarrier', - 'V2W':'stm_RepeatWriteBarrier', - 'A2V':'stm_WriteBarrier', - } - - STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, - func[stmcat]) - - - @specialize.arg(2) - def _do_barrier(self, gcref_struct, returns_modified_object): - assert returns_modified_object - from rpython.memory.gc.stmgc import StmGC - objadr = llmemory.cast_ptr_to_adr(gcref_struct) - objhdr = rffi.cast(StmGC.GCHDRP, gcref_struct) - - # for A2W, we check h_revision and WRITE_BARRIER - # for V2W, we only check WRITE_BARRIER - # for A2V, we only check h_revision - - # if it is a repeated WB or h_revision == privat_rev of transaction - priv_rev = self.llop1.stm_get_adr_of_private_rev_num(rffi.SIGNEDP) - if self.stmcat == 'V2W' or objhdr.h_revision == priv_rev[0]: - # also WRITE_BARRIER not set? - if (self.stmcat == 'A2V' - or not (objhdr.h_tid & StmGC.GCFLAG_WRITE_BARRIER)): - return gcref_struct - - funcptr = self.get_barrier_funcptr(returns_modified_object) - res = funcptr(objadr) - return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) - - class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py kind = 'framework' @@ -551,7 +380,6 @@ def _initialize_for_tests(self): self.layoutbuilder = None self.fielddescr_tid = AbstractDescr() - self.fielddescr_rev = AbstractDescr() self.max_size_of_young_obj = 1000 self.GCClass = None self.gcheaderbuilder = None @@ -592,27 +420,17 @@ def _setup_tid(self): if not self.stm: self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') - self.fielddescr_rev = None else: + xxxxxxxx self.fielddescr_tid = get_field_descr(self, self.GCClass.GCHDR, 'h_tid') - self.fielddescr_rev = get_field_descr(self, self.GCClass.GCHDR, - 'h_revision') - frame_tid = self.layoutbuilder.get_type_id(jitframe.JITFRAME) self.translator._jit2gc['frame_tid'] = frame_tid def _setup_write_barrier(self): - if self.stm: - self.A2Rdescr = STMReadBarrierDescr(self, 'A2R') - self.A2Idescr = STMReadBarrierDescr(self, 'A2I') - self.Q2Rdescr = STMReadBarrierDescr(self, 'Q2R') - self.A2Wdescr = STMWriteBarrierDescr(self, 'A2W') - self.A2Vdescr = STMWriteBarrierDescr(self, 'A2V') - self.V2Wdescr = STMWriteBarrierDescr(self, 'V2W') - self.write_barrier_descr = "wbdescr: do not use" - else: - self.write_barrier_descr = WriteBarrierDescr(self) + self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( + [llmemory.Address], lltype.Void)) + self.write_barrier_descr = WriteBarrierDescr(self) def _make_functions(self, really_not_translated): from rpython.memory.gctypelayout import check_typeid @@ -775,15 +593,12 @@ hdr.tid = tid def can_use_nursery_malloc(self, size): - return (self.max_size_of_young_obj is not None and - size < self.max_size_of_young_obj) + return size < self.max_size_of_young_obj def has_write_barrier_class(self): return WriteBarrierDescr def get_malloc_slowpath_addr(self): - if self.max_size_of_young_obj is None: # stm - return None return self.get_malloc_fn_addr('malloc_nursery') def get_malloc_slowpath_array_addr(self): diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -59,7 +59,7 @@ if op.is_malloc(): self.handle_malloc_operation(op) continue - elif op.can_malloc(): + elif op.is_call(): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: self.emitting_an_operation_that_can_collect() diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -25,10 +25,9 @@ return # ---------- transaction breaks ---------- if opnum == rop.STM_TRANSACTION_BREAK: - # XXX redo! - #self.emitting_an_operation_that_can_collect() - #self.next_op_may_be_in_new_transaction() - #self.newops.append(op) + self.emitting_an_operation_that_can_collect() + self.next_op_may_be_in_new_transaction() + self.newops.append(op) return # ---------- pure operations, guards ---------- if op.is_always_pure() or op.is_guard() or op.is_ovf(): @@ -41,6 +40,17 @@ return # ---------- calls ---------- if op.is_call(): + if opnum == rop.CALL and op.getdescr(): + d = op.getdescr() + assert isinstance(d, CallDescr) + ei = d.get_extra_info() + if ei and (ei.oopspecindex == + EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION): + self.newops.append(op) + return + # + self.next_op_may_be_in_new_transaction() + # if opnum == rop.CALL_RELEASE_GIL: # self.fallback_inevitable(op) # is done by assembler._release_gil_shadowstack() @@ -59,125 +69,41 @@ else: self.newops.append(op) return + # ---------- setters for pure fields ---------- + if opnum in (rop.STRSETITEM, rop.UNICODESETITEM): + self.handle_setters_for_pure_fields(op) + return # ---------- copystrcontent ---------- if opnum in (rop.COPYSTRCONTENT, rop.COPYUNICODECONTENT): self.handle_copystrcontent(op) - continue - XXX + return # ---------- raw getfields and setfields ---------- if opnum in (rop.GETFIELD_RAW, rop.SETFIELD_RAW): if self.maybe_handle_raw_accesses(op): - continue + return # ---------- labels ---------- if opnum == rop.LABEL: - self.emitting_an_operation_that_can_collect() + # note that the parent class also clears some things on a LABEL self.next_op_may_be_in_new_transaction() - self.newops.append(op) - continue - # ---------- jumps ---------- - if opnum == rop.JUMP: + return + # ---------- jumps, finish, other ignored ops ---------- + if opnum in (rop.JUMP, rop.FINISH, rop.FORCE_TOKEN, + rop.READ_TIMESTAMP, rop.MARK_OPAQUE_PTR, + rop.JIT_DEBUG, rop.KEEPALIVE, + rop.QUASIIMMUT_FIELD, rop.RECORD_KNOWN_CLASS, + ): self.newops.append(op) - continue - # ---------- finish, other ignored ops ---------- - if opnum in (rop.FINISH, rop.FORCE_TOKEN, - rop.READ_TIMESTAMP, rop.MARK_OPAQUE_PTR, - rop.JIT_DEBUG, rop.KEEPALIVE, - rop.QUASIIMMUT_FIELD, rop.RECORD_KNOWN_CLASS, - ): - self.newops.append(op) - continue + return # ---------- fall-back ---------- - # Check that none of the ops handled here can_collect - # or cause a transaction break. This is not done by - # the fallback here + # Check that none of the ops handled here can collect. + # This is not done by the fallback here + assert not op.is_call() and not op.is_malloc() self.fallback_inevitable(op) - debug_print("fallback for", op.repr()) - def emitting_an_operation_that_can_collect(self): - GcRewriterAssembler.emitting_an_operation_that_can_collect(self) - self.invalidate_write_categories() - def next_op_may_be_in_new_transaction(self): - self.known_lengths.clear() # XXX: check if really necessary or - # just for labels - self.known_category.clear() self.always_inevitable = False - def invalidate_write_categories(self): - for v, c in self.known_category.items(): - if c == 'W': - self.known_category[v] = 'V' - - def invalidate_read_categories(self, reason): - # XXX: needs aliasing info to be better - # XXX: move to optimizeopt to only invalidate same typed vars? - for v, c in self.known_category.items(): - if c == 'R': - self.known_category[v] = 'Q' - - - def get_barrier_descr(self, from_cat, to_cat): - # compare with translator.stm.funcgen.stm_barrier - # XXX: specialize more with info of IMMUTABLE and NOPTR - if from_cat >= to_cat: - return None - - gc = self.gc_ll_descr - if to_cat == 'W': - if from_cat >= 'V': - return gc.V2Wdescr - return gc.A2Wdescr - elif to_cat == 'V': - return gc.A2Vdescr - elif to_cat == 'R': - if from_cat >= 'Q': - return gc.Q2Rdescr - return gc.A2Rdescr - elif to_cat == 'I': - return gc.A2Idescr - - def gen_initialize_tid(self, v_newgcobj, tid): - GcRewriterAssembler.gen_initialize_tid(self, v_newgcobj, tid) - if self.gc_ll_descr.fielddescr_rev is not None: - op = ResOperation(rop.STM_SET_REVISION_GC, [v_newgcobj,], None, - descr=self.gc_ll_descr.fielddescr_rev) - self.newops.append(op) - - def gen_write_barrier(self, v): - raise NotImplementedError - - def gen_barrier(self, v_base, target_category): - v_base = self.unconstifyptr(v_base) - assert isinstance(v_base, BoxPtr) - source_category = self.known_category.get(v_base, 'A') - write_barrier_descr = self.get_barrier_descr(source_category, - target_category) - if write_barrier_descr is None: - return v_base # no barrier needed - - if target_category in ('W', 'V'): - # if *any* of the readable vars is the same object, - # it must repeat the read_barrier now - self.invalidate_read_categories(v_base) - - args = [v_base,] - op = rop.COND_CALL_STM_B - self.newops.append(ResOperation(op, args, None, - descr=write_barrier_descr)) - - self.known_category[v_base] = target_category - return v_base - - def unconstifyptr(self, v): - if isinstance(v, ConstPtr): - v_in = v - v_out = BoxPtr() - self.newops.append(ResOperation(rop.SAME_AS, [v_in], v_out)) - v = v_out - assert isinstance(v, BoxPtr) - return v - def handle_getfields(self, op): opnum = op.getopnum() descr = op.getdescr() @@ -254,6 +180,7 @@ self._do_stm_call('stm_try_inevitable', [], None) self.always_inevitable = True self.newops.append(op) + debug_print("fallback for", op.repr()) def _is_null(self, box): return isinstance(box, ConstPtr) and not box.value diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -71,7 +71,6 @@ register_known_gctype(self.cpu, o_vtable, O) # tiddescr = self.gc_ll_descr.fielddescr_tid - revdescr = self.gc_ll_descr.fielddescr_rev wbdescr = self.gc_ll_descr.write_barrier_descr WORD = globals()['WORD'] # @@ -103,11 +102,9 @@ namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, '%s_descr' % funcname) # - ops = parse(frm_operations, namespace=namespace, - invent_fail_descr=False) + ops = parse(frm_operations, namespace=namespace) expected = parse(to_operations % Evaluator(namespace), - namespace=namespace, - invent_fail_descr=False) + namespace=namespace) operations = self.gc_ll_descr.rewrite_assembler(self.cpu, ops.operations, []) @@ -287,7 +284,7 @@ gcdescr = get_description(config_) self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, really_not_translated=True) - self.gc_ll_descr.write_barrier_descr.has_barrier_from_array = ( + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( lambda cpu: True) # class FakeCPU(BaseFakeCPU): @@ -579,7 +576,7 @@ """) def test_write_barrier_before_array_without_from_array(self): - self.gc_ll_descr.write_barrier_descr.has_barrier_from_array = ( + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( lambda cpu: False) self.check_rewrite(""" [p1, i2, p3] From noreply at buildbot.pypy.org Sat Mar 22 12:04:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 12:04:21 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: read barriers Message-ID: <20140322110421.0CD0D1D253B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70160:8c9539ab0df6 Date: 2014-03-22 12:03 +0100 http://bitbucket.org/pypy/pypy/changeset/8c9539ab0df6/ Log: read barriers diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -17,6 +17,7 @@ def __init__(self, *args): GcRewriterAssembler.__init__(self, *args) self.always_inevitable = False + self.read_barrier_applied = {} def other_operation(self, op): opnum = op.getopnum() @@ -103,28 +104,23 @@ def next_op_may_be_in_new_transaction(self): self.always_inevitable = False + self.read_barrier_applied.clear() def handle_getfields(self, op): - opnum = op.getopnum() - descr = op.getdescr() - target_category = 'R' - # XXX: review: - # if opnum == rop.GETFIELD_GC: - # assert isinstance(descr, FieldDescr) - # if descr.is_immutable(): - # target_category = 'I' - # elif opnum == rop.GETINTERIORFIELD_GC: - # assert isinstance(descr, InteriorFieldDescr) - # if descr.is_immutable(): - # target_category = 'I' - # elif opnum == rop.GETARRAYITEM_GC: - # assert isinstance(descr, ArrayDescr) - # if descr.is_immutable(): - # target_category = 'I' - - self.handle_category_operations(op, target_category) + # XXX missing optimitations: the placement of stm_read should + # ideally be delayed for a bit longer after the getfields; if we + # group together several stm_reads then we can save one + # instruction; if delayed over a cond_call_gc_wb then we can + # omit the stm_read completely; ... + self.newops.append(op) + v_ptr = op.getarg(0) + if (v_ptr not in self.read_barrier_applied and + v_ptr not in self.write_barrier_applied): + op1 = ResOperation(rop.STM_READ, [v_ptr], None) + self.newops.append(op1) + self.read_barrier_applied[v_ptr] = None - + def handle_setfields(self, op): opnum = op.getopnum() descr = op.getdescr() @@ -174,7 +170,6 @@ self.newops.append(op1) def fallback_inevitable(self, op): - self.known_category.clear() if not self.always_inevitable: self.emitting_an_operation_that_can_collect() self._do_stm_call('stm_try_inevitable', [], None) diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -20,6 +20,10 @@ words.append('CALL_MALLOC_GC') words.append('COND_CALL_GC_WB') words.append('COND_CALL_GC_WB_ARRAY') + # these are pure, and can be done without any read barrier + words.append('ARRAYLEN_GC') + words.append('GETFIELD_GC_PURE') + words.append('GETARRAYITEM_GC_PURE') # words = set(words) missing = [] @@ -85,7 +89,7 @@ call(123, descr=cd) jump() """ % ("$INEV" if inev else "",), cd=calldescr) - + def test_rewrite_one_setfield_gc(self): self.check_rewrite(""" [p1, p2] @@ -93,9 +97,8 @@ jump() """, """ [p1, p2] - cond_call_stm_b(p1, descr=A2Wdescr) + cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, p2, descr=tzdescr) - jump() """) @@ -108,13 +111,42 @@ jump() """, """ [p1, p2] - p3 = same_as(ConstPtr(t)) - cond_call_stm_b(p3, descr=A2Wdescr) - setfield_gc(p3, p2, descr=tzdescr) - + cond_call_gc_wb(ConstPtr(t), descr=wbdescr) + setfield_gc(ConstPtr(t), p2, descr=tzdescr) jump() """, t=NULL) + def test_rewrite_one_getfield_gc(self): + self.check_rewrite(""" + [p1] + p2 = getfield_gc(p1, descr=tzdescr) + jump() + """, """ + [p1] + p2 = getfield_gc(p1, descr=tzdescr) + stm_read(p1) + jump() + """) + + def test_rewrite_several_getfield_gc(self): + self.check_rewrite(""" + [p1, p2] + p3 = getfield_gc(p1, descr=tzdescr) + p4 = getfield_gc(p1, descr=tzdescr) + p5 = getfield_gc(p2, descr=tzdescr) + p6 = getfield_gc(p1, descr=tzdescr) + jump() + """, """ + [p1, p2] + p3 = getfield_gc(p1, descr=tzdescr) + stm_read(p1) + p4 = getfield_gc(p1, descr=tzdescr) + p5 = getfield_gc(p2, descr=tzdescr) + stm_read(p2) + p6 = getfield_gc(p1, descr=tzdescr) + jump() + """) + def test_invalidate_read_status_after_write_to_constptr(self): TP = lltype.GcArray(lltype.Signed) NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -346,6 +346,7 @@ rop.CALL_MALLOC_NURSERY_VARSIZE, rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, rop.LABEL, + rop.STM_READ, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -511,6 +511,7 @@ 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] 'KEEPALIVE/1', 'STM_TRANSACTION_BREAK/1', + 'STM_READ/1', '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', From noreply at buildbot.pypy.org Sat Mar 22 12:12:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 12:12:39 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fixes Message-ID: <20140322111239.0E0621D253B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70161:c3d7dd930444 Date: 2014-03-22 12:12 +0100 http://bitbucket.org/pypy/pypy/changeset/c3d7dd930444/ Log: Fixes diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -386,34 +386,32 @@ # ---------- + def must_apply_write_barrier(self, val, v): + if val not in self.write_barrier_applied: + if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and + bool(v.value)): # store a non-NULL + return True + return False + def handle_write_barrier_setfield(self, op): val = op.getarg(0) - if val not in self.write_barrier_applied: - v = op.getarg(1) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self.gen_write_barrier(val) - #op = op.copy_and_change(rop.SETFIELD_RAW) + if self.must_apply_write_barrier(val, op.getarg(1)): + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETFIELD_RAW) self.newops.append(op) def handle_write_barrier_setinteriorfield(self, op): val = op.getarg(0) - if val not in self.write_barrier_applied: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self.gen_write_barrier(val) - #op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) + if self.must_apply_write_barrier(val, op.getarg(2)): + self.gen_write_barrier(val) + #op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) self.newops.append(op) def handle_write_barrier_setarrayitem(self, op): val = op.getarg(0) - if val not in self.write_barrier_applied: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self.gen_write_barrier_array(val, op.getarg(1)) - #op = op.copy_and_change(rop.SETARRAYITEM_RAW) + if self.must_apply_write_barrier(val, op.getarg(2)): + self.gen_write_barrier_array(val, op.getarg(1)) + #op = op.copy_and_change(rop.SETARRAYITEM_RAW) self.newops.append(op) def gen_write_barrier(self, v_base): diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -121,6 +121,10 @@ self.read_barrier_applied[v_ptr] = None + def must_apply_write_barrier(self, val, v): + return val not in self.write_barrier_applied + + def handle_setfields(self, op): opnum = op.getopnum() descr = op.getdescr() diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -102,6 +102,18 @@ jump() """) + def test_rewrite_one_setfield_gc_i(self): + self.check_rewrite(""" + [p1, i2] + setfield_gc(p1, i2, descr=tzdescr) + jump() + """, """ + [p1, i2] + cond_call_gc_wb(p1, descr=wbdescr) + setfield_gc(p1, i2, descr=tzdescr) + jump() + """) + def test_rewrite_setfield_gc_const(self): TP = lltype.GcArray(lltype.Signed) NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) @@ -147,6 +159,20 @@ jump() """) + def test_rewrite_getfield_after_setfield(self): + self.check_rewrite(""" + [p1, i2] + setfield_gc(p1, i2, descr=tydescr) + p3 = getfield_gc(p1, descr=tzdescr) + jump(p3) + """, """ + [p1, i2] + cond_call_gc_wb(p1, descr=wbdescr) + setfield_gc(p1, i2, descr=tydescr) + p3 = getfield_gc(p1, descr=tzdescr) + jump(p3) + """) + def test_invalidate_read_status_after_write_to_constptr(self): TP = lltype.GcArray(lltype.Signed) NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) @@ -162,9 +188,9 @@ [p0] p1 = same_as(ConstPtr(t)) p2 = same_as(ConstPtr(t)) - cond_call_stm_b(p1, descr=A2Rdescr) p3 = getfield_gc(p1, descr=tzdescr) - cond_call_stm_b(p2, descr=A2Wdescr) + stm_read(p1) + cond_call_gc_wb(p2, descr=wbdescr) setfield_gc(p2, p0, descr=tzdescr) cond_call_stm_b(p1, descr=Q2Rdescr) p4 = getfield_gc(p1, descr=tzdescr) From noreply at buildbot.pypy.org Sat Mar 22 12:17:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 12:17:55 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: progress Message-ID: <20140322111755.CFE8C1D253B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70162:1226e7465baf Date: 2014-03-22 12:17 +0100 http://bitbucket.org/pypy/pypy/changeset/1226e7465baf/ Log: progress diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -125,38 +125,8 @@ return val not in self.write_barrier_applied - def handle_setfields(self, op): - opnum = op.getopnum() - descr = op.getdescr() - target_category = 'W' - if opnum == rop.SETFIELD_GC: - assert isinstance(descr, FieldDescr) - if not descr.is_pointer_field(): - target_category = 'V' - elif opnum == rop.SETINTERIORFIELD_GC: - assert isinstance(descr, InteriorFieldDescr) - if not descr.is_pointer_field(): - target_category = 'V' - elif opnum == rop.SETARRAYITEM_GC: - assert isinstance(descr, ArrayDescr) - if not descr.is_array_of_pointers(): - target_category = 'V' - elif opnum in (rop.STRSETITEM, rop.UNICODESETITEM): - target_category = 'V' - - self.handle_category_operations(op, target_category) - - - def handle_category_operations(self, op, target_category): - lst = op.getarglist() - lst[0] = self.gen_barrier(lst[0], target_category) - self.newops.append(op.copy_and_change(op.getopnum(), args=lst)) - - def handle_malloc_operation(self, op): - GcRewriterAssembler.handle_malloc_operation(self, op) - self.known_category[op.result] = 'W' - def handle_copystrcontent(self, op): + xxxxxxxx # first, a write barrier on the target string lst = op.getarglist() lst[1] = self.gen_barrier(lst[1], 'W') @@ -181,10 +151,8 @@ self.newops.append(op) debug_print("fallback for", op.repr()) - def _is_null(self, box): - return isinstance(box, ConstPtr) and not box.value - def maybe_handle_raw_accesses(self, op): + xxxxx from rpython.jit.backend.llsupport.descr import FieldDescr descr = op.getdescr() assert isinstance(descr, FieldDescr) diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -24,6 +24,10 @@ words.append('ARRAYLEN_GC') words.append('GETFIELD_GC_PURE') words.append('GETARRAYITEM_GC_PURE') + # these are handled by rewrite.py + words.append('SETFIELD_GC') + words.append('SETARRAYITEM_GC') + words.append('SETINTERIORFIELD_GC') # words = set(words) missing = [] @@ -173,108 +177,25 @@ jump(p3) """) - def test_invalidate_read_status_after_write_to_constptr(self): + def test_mixed_case(self): TP = lltype.GcArray(lltype.Signed) NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) self.check_rewrite(""" - [p0] - p1 = same_as(ConstPtr(t)) - p2 = same_as(ConstPtr(t)) + [p0, p1, p2] p3 = getfield_gc(p1, descr=tzdescr) setfield_gc(p2, p0, descr=tzdescr) p4 = getfield_gc(p1, descr=tzdescr) jump() """, """ - [p0] - p1 = same_as(ConstPtr(t)) - p2 = same_as(ConstPtr(t)) + [p0, p1, p2] p3 = getfield_gc(p1, descr=tzdescr) stm_read(p1) cond_call_gc_wb(p2, descr=wbdescr) setfield_gc(p2, p0, descr=tzdescr) - cond_call_stm_b(p1, descr=Q2Rdescr) p4 = getfield_gc(p1, descr=tzdescr) - jump() """, t=NULL) - def test_invalidate_read_status_after_write(self): - self.check_rewrite(""" - [p0] - p1 = same_as(p0) - p2 = same_as(p0) - p4 = getfield_gc(p1, descr=tzdescr) - setfield_gc(p2, p0, descr=tzdescr) - p5 = getfield_gc(p1, descr=tzdescr) - jump() - """, """ - [p0] - p1 = same_as(p0) - p2 = same_as(p0) - cond_call_stm_b(p1, descr=A2Rdescr) - p4 = getfield_gc(p1, descr=tzdescr) - cond_call_stm_b(p2, descr=A2Wdescr) - setfield_gc(p2, p0, descr=tzdescr) - cond_call_stm_b(p1, descr=Q2Rdescr) - p5 = getfield_gc(p1, descr=tzdescr) - - jump() - """) - - def test_invalidate_read_status_after_write_to_field(self): - self.check_rewrite(""" - [p0] - p1 = getfield_gc(p0, descr=tzdescr) - p2 = getfield_gc(p0, descr=tzdescr) - p3 = getfield_gc(p1, descr=tzdescr) - setfield_gc(p2, p0, descr=tzdescr) - p4 = getfield_gc(p1, descr=tzdescr) - jump() - """, """ - [p0] - cond_call_stm_b(p0, descr=A2Rdescr) - p1 = getfield_gc(p0, descr=tzdescr) - p2 = getfield_gc(p0, descr=tzdescr) - cond_call_stm_b(p1, descr=A2Rdescr) - p3 = getfield_gc(p1, descr=tzdescr) - cond_call_stm_b(p2, descr=A2Wdescr) - setfield_gc(p2, p0, descr=tzdescr) - cond_call_stm_b(p1, descr=Q2Rdescr) - p4 = getfield_gc(p1, descr=tzdescr) - - jump() - """) - - def test_invalidate_read_status_after_write_array_interior(self): - ops = [('getarrayitem_gc', 'adescr'), - ('getinteriorfield_gc', 'intzdescr')] - original = """ - [p0, i1, i2] - p1 = %s(p0, i1, descr=%s) - p2 = %s(p0, i2, descr=%s) - p3 = getfield_gc(p1, descr=tzdescr) - setfield_gc(p2, p0, descr=tzdescr) - p4 = getfield_gc(p1, descr=tzdescr) - jump() - """ - rewritten = """ - [p0, i1, i2] - cond_call_stm_b(p0, descr=A2Rdescr) - p1 = %s(p0, i1, descr=%s) - p2 = %s(p0, i2, descr=%s) - cond_call_stm_b(p1, descr=A2Rdescr) - p3 = getfield_gc(p1, descr=tzdescr) - cond_call_stm_b(p2, descr=A2Wdescr) - setfield_gc(p2, p0, descr=tzdescr) - cond_call_stm_b(p1, descr=Q2Rdescr) - p4 = getfield_gc(p1, descr=tzdescr) - - jump() - """ - for op, descr in ops: - self.check_rewrite(original % (op, descr, op, descr), - rewritten % (op, descr, op, descr)) - def test_rewrite_write_barrier_after_malloc(self): self.check_rewrite(""" [p1, p3] @@ -284,18 +205,16 @@ jump(p2) """, """ [p1, p3] - cond_call_stm_b(p3, descr=A2Wdescr) + cond_call_gc_wb(p3, descr=wbdescr) setfield_gc(p3, p1, descr=tzdescr) p2 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr) - stm_set_revision_gc(p2, descr=revdescr) - cond_call_stm_b(p3, descr=V2Wdescr) + cond_call_gc_wb(p3, descr=wbdescr) setfield_gc(p3, p1, descr=tzdescr) - jump(p2) """) - def test_rewrite_read_barrier_after_malloc(self): + def test_rewrite_no_read_barrier_after_malloc(self): self.check_rewrite(""" [p1] p2 = getfield_gc(p1, descr=tzdescr) @@ -304,16 +223,14 @@ jump(p2) """, """ [p1] - cond_call_stm_b(p1, descr=A2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) + stm_read(p1) p3 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p3, %(tdescr.tid)d, descr=tiddescr) - stm_set_revision_gc(p3, descr=revdescr) p4 = getfield_gc(p1, descr=tzdescr) - jump(p2) """) - + def test_rewrite_setfield_gc_on_local(self): self.check_rewrite(""" [p1] From noreply at buildbot.pypy.org Sat Mar 22 12:39:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 12:39:19 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: progress Message-ID: <20140322113919.58E0F1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70163:cf31cfe0189e Date: 2014-03-22 12:38 +0100 http://bitbucket.org/pypy/pypy/changeset/cf31cfe0189e/ Log: progress diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -121,7 +121,7 @@ self.read_barrier_applied[v_ptr] = None - def must_apply_write_barrier(self, val, v): + def must_apply_write_barrier(self, val, v=None): return val not in self.write_barrier_applied @@ -152,7 +152,6 @@ debug_print("fallback for", op.repr()) def maybe_handle_raw_accesses(self, op): - xxxxx from rpython.jit.backend.llsupport.descr import FieldDescr descr = op.getdescr() assert isinstance(descr, FieldDescr) @@ -160,3 +159,9 @@ self.newops.append(op) return True return False + + def handle_setters_for_pure_fields(self, op): + val = op.getarg(0) + if self.must_apply_write_barrier(val): + self.gen_write_barrier(val) + self.newops.append(op) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -102,9 +102,11 @@ namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, '%s_descr' % funcname) # - ops = parse(frm_operations, namespace=namespace) + ops = parse(frm_operations, namespace=namespace, + invent_fail_descr=False) expected = parse(to_operations % Evaluator(namespace), - namespace=namespace) + namespace=namespace, + invent_fail_descr=False) operations = self.gc_ll_descr.rewrite_assembler(self.cpu, ops.operations, []) diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -50,6 +50,8 @@ gcdescr = get_description(config_) self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, really_not_translated=True) + self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + lambda cpu: False) # for now # class FakeCPU(BaseFakeCPU): def sizeof(self, STRUCT): @@ -241,12 +243,24 @@ [p1] p2 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr) - stm_set_revision_gc(p2, descr=revdescr) setfield_gc(p2, p1, descr=tzdescr) - jump(p2) """) + def test_rewrite_getfield_gc_on_local(self): + self.check_rewrite(""" + [] + p2 = new(descr=tdescr) + p1 = getfield_gc(p2, descr=tzdescr) + jump(p1) + """, """ + [] + p2 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr) + p1 = getfield_gc(p2, descr=tzdescr) + jump(p1) + """) + def test_rewrite_unrelated_setfield_gcs(self): self.check_rewrite(""" [p1, p2, p3, p4] @@ -255,11 +269,10 @@ jump() """, """ [p1, p2, p3, p4] - cond_call_stm_b(p1, descr=A2Wdescr) + cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, p2, descr=tzdescr) - cond_call_stm_b(p3, descr=A2Wdescr) + cond_call_gc_wb(p3, descr=wbdescr) setfield_gc(p3, p4, descr=tzdescr) - jump() """) @@ -271,10 +284,9 @@ jump() """, """ [p1, p2, i3] - cond_call_stm_b(p1, descr=A2Wdescr) + cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, p2, descr=tzdescr) setfield_gc(p1, i3, descr=tydescr) - jump() """) @@ -283,16 +295,15 @@ [p1, p2, i3] setfield_gc(p1, p2, descr=tzdescr) label(p1, i3) - setfield_gc(p1, i3, descr=tydescr) # noptr + setfield_gc(p1, i3, descr=tydescr) jump(p1) """, """ [p1, p2, i3] - cond_call_stm_b(p1, descr=A2Wdescr) + cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, p2, descr=tzdescr) label(p1, i3) - cond_call_stm_b(p1, descr=A2Vdescr) # noptr + cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, i3, descr=tydescr) - jump(p1) """) @@ -319,8 +330,9 @@ "jit_debug(i1, i2)", "keepalive(i1)", "i3 = int_sub_ovf(i1, i2)", # is_ovf operations + "increment_debug_counter(i1)", ] - for op in oplist: + for op in oplist: testcase = """ [i1, i2, p1, p2, f1] %s @@ -328,19 +340,6 @@ """ % op self.check_rewrite(testcase, testcase) - def test_rewrite_getfield_gc(self): - self.check_rewrite(""" - [p1] - p2 = getfield_gc(p1, descr=tzdescr) - jump(p2) - """, """ - [p1] - cond_call_stm_b(p1, descr=A2Rdescr) - p2 = getfield_gc(p1, descr=tzdescr) - - jump(p2) - """) - def test_rewrite_getfield_gc_const(self): TP = lltype.GcArray(lltype.Signed) NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) @@ -350,10 +349,8 @@ jump(p2) """, """ [p1] - p3 = same_as(ConstPtr(t)) - cond_call_stm_b(p3, descr=A2Rdescr) - p2 = getfield_gc(p3, descr=tzdescr) - + p2 = getfield_gc(ConstPtr(t), descr=tzdescr) + stm_read(ConstPtr(t)) jump(p2) """, t=NULL) # XXX could do better: G2Rdescr @@ -365,9 +362,8 @@ jump(i3) """, """ [p1, i2] - cond_call_stm_b(p1, descr=A2Rdescr) i3 = getarrayitem_gc(p1, i2, descr=adescr) - + stm_read(p1) jump(i3) """) @@ -378,27 +374,11 @@ jump(i3) """, """ [p1, i2] - cond_call_stm_b(p1, descr=A2Rdescr) i3 = getinteriorfield_gc(p1, i2, descr=intzdescr) - + stm_read(p1) jump(i3) """) - def test_rewrite_several_getfield_gcs(self): - self.check_rewrite(""" - [p1] - p2 = getfield_gc(p1, descr=tzdescr) - i2 = getfield_gc(p1, descr=tydescr) - jump(p2, i2) - """, """ - [p1] - cond_call_stm_b(p1, descr=A2Rdescr) - p2 = getfield_gc(p1, descr=tzdescr) - i2 = getfield_gc(p1, descr=tydescr) - - jump(p2, i2) - """) - def test_rewrite_unrelated_getfield_gcs(self): self.check_rewrite(""" [p1] @@ -407,11 +387,10 @@ jump(p2, i2) """, """ [p1] - cond_call_stm_b(p1, descr=A2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) - cond_call_stm_b(p2, descr=A2Rdescr) + stm_read(p1) i2 = getfield_gc(p2, descr=tydescr) - + stm_read(p2) jump(p2, i2) """) @@ -426,46 +405,15 @@ jump(p1) """, """ [p1] - cond_call_stm_b(p1, descr=A2Rdescr) i1 = getfield_gc(p1, descr=tydescr) + stm_read(p1) i2 = int_add(i1, 1) - cond_call_stm_b(p1, descr=A2Vdescr) + cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, i2, descr=tydescr) jump(p1) """) - def test_setfield_followed_by_getfield(self): - self.check_rewrite(""" - [p1] - setfield_gc(p1, 123, descr=tydescr) # noptr - p2 = getfield_gc(p1, descr=tzdescr) - jump(p2) - """, """ - [p1] - cond_call_stm_b(p1, descr=A2Vdescr) - setfield_gc(p1, 123, descr=tydescr) - p2 = getfield_gc(p1, descr=tzdescr) - - jump(p2) - """) - - def test_rewrite_getfield_gc_on_local_2(self): - self.check_rewrite(""" - [p0] - p1 = new(descr=tdescr) - p2 = getfield_gc(p1, descr=tzdescr) - jump(p2) - """, """ - [p0] - p1 = call_malloc_nursery(%(tdescr.size)d) - setfield_gc(p1, %(tdescr.tid)d, descr=tiddescr) - stm_set_revision_gc(p1, descr=revdescr) - p2 = getfield_gc(p1, descr=tzdescr) - - jump(p2) - """) - def test_rewrite_getfield_gc_on_future_local_after_call(self): # XXX could detect CALLs that cannot interrupt the transaction # and/or could use the L category @@ -484,12 +432,11 @@ jump(p2) """, """ [p1] - cond_call_stm_b(p1, descr=A2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) + stm_read(p1) call(p2, descr=calldescr1) - cond_call_stm_b(p1, descr=A2Vdescr) + cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, 5, descr=tydescr) - jump(p2) """, calldescr1=calldescr1) @@ -567,9 +514,9 @@ jump() """, """ [p1, i1, p2, p3, i3, p4] - cond_call_stm_b(p1, descr=A2Vdescr) + cond_call_gc_wb(p1, descr=wbdescr) setarrayitem_gc(p1, i1, p2, descr=adescr) - cond_call_stm_b(p3, descr=A2Vdescr) + cond_call_gc_wb(p3, descr=wbdescr) setarrayitem_gc(p3, i3, p4, descr=adescr) jump() @@ -584,7 +531,7 @@ jump() """, """ [p1, p2, i2, p3, i3] - cond_call_stm_b(p1, descr=A2Vdescr) + cond_call_gc_wb(p1, descr=wbdescr) setarrayitem_gc(p1, i2, p2, descr=adescr) i4 = read_timestamp() setarrayitem_gc(p1, i3, p3, descr=adescr) @@ -601,7 +548,7 @@ jump() """, """ [p1, p2, i2, p3, i3] - cond_call_stm_b(p1, descr=A2Wdescr) + cond_call_gc_wb(p1, descr=wbdescr) setinteriorfield_gc(p1, i2, p2, descr=intzdescr) i4 = read_timestamp() setinteriorfield_gc(p1, i3, p3, descr=intzdescr) @@ -617,14 +564,28 @@ jump() """, """ [p1, i2, i3] - cond_call_stm_b(p1, descr=A2Vdescr) + cond_call_gc_wb(p1, descr=wbdescr) strsetitem(p1, i2, i3) unicodesetitem(p1, i2, i3) jump() """) - # py.test.skip("XXX not really right: should instead be an assert " - # "that p1 is already a W") + + def test_rewrite_strsetitem_unicodesetitem_on_fresh_malloc(self): + self.check_rewrite(""" + [i2, i3] + p1 = newstr(i3) + strsetitem(p1, i2, i3) + unicodesetitem(p1, i2, i3) + jump() + """, """ + [i2, i3] + p1 = call_malloc_nursery_varsize(1, 1, i3, descr=strdescr) + setfield_gc(p1, i3, descr=strlendescr) + strsetitem(p1, i2, i3) + unicodesetitem(p1, i2, i3) + jump() + """) def test_rewrite_strgetitem_unicodegetitem(self): self.check_rewrite(""" @@ -634,13 +595,10 @@ jump() """, """ [p1, i2, i3] - cond_call_stm_b(p1, descr=A2Rdescr) i4=strgetitem(p1, i2) i5=unicodegetitem(p1, i2) - jump() """) - def test_call_release_gil(self): T = rffi.CArrayPtr(rffi.TIME_T) From noreply at buildbot.pypy.org Sat Mar 22 12:43:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 12:43:38 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: progress Message-ID: <20140322114338.74E7D1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70164:59318684cd56 Date: 2014-03-22 12:42 +0100 http://bitbucket.org/pypy/pypy/changeset/59318684cd56/ Log: progress diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -72,11 +72,11 @@ return # ---------- setters for pure fields ---------- if opnum in (rop.STRSETITEM, rop.UNICODESETITEM): - self.handle_setters_for_pure_fields(op) + self.handle_setters_for_pure_fields(op, 0) return # ---------- copystrcontent ---------- if opnum in (rop.COPYSTRCONTENT, rop.COPYUNICODECONTENT): - self.handle_copystrcontent(op) + self.handle_setters_for_pure_fields(op, 1) return # ---------- raw getfields and setfields ---------- if opnum in (rop.GETFIELD_RAW, rop.SETFIELD_RAW): @@ -160,8 +160,8 @@ return True return False - def handle_setters_for_pure_fields(self, op): - val = op.getarg(0) + def handle_setters_for_pure_fields(self, op, targetindex): + val = op.getarg(targetindex) if self.must_apply_write_barrier(val): self.gen_write_barrier(val) self.newops.append(op) diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -612,17 +612,17 @@ jump(i2, p7) """, """ [i1, i2, i3, p7] - cond_call_stm_b(p7, descr=A2Vdescr) + cond_call_gc_wb(p7, descr=wbdescr) setfield_gc(p7, 10, descr=tydescr) call_release_gil(123, descr=calldescr2) guard_not_forced() [] - cond_call_stm_b(p7, descr=A2Vdescr) + cond_call_gc_wb(p7, descr=wbdescr) setfield_gc(p7, 20, descr=tydescr) jump(i2, p7) """, calldescr2=calldescr2) - + def test_fallback_to_inevitable(self): T = rffi.CArrayPtr(rffi.TIME_T) calldescr2 = get_call_descr(self.gc_ll_descr, [T], rffi.TIME_T) @@ -641,27 +641,39 @@ jump(i2, p7) """ % op, """ [i1, i2, i3, p7] - cond_call_stm_b(p7, descr=A2Vdescr) + cond_call_gc_wb(p7, descr=wbdescr) setfield_gc(p7, 10, descr=tydescr) $INEV %s - cond_call_stm_b(p7, descr=A2Vdescr) + cond_call_gc_wb(p7, descr=wbdescr) setfield_gc(p7, 20, descr=tydescr) jump(i2, p7) """ % op, calldescr2=calldescr2) - def test_copystrcontent(self): + def test_copystrcontent_new(self): + self.check_rewrite(""" + [p1, i1, i2, i3] + p2 = newstr(i3) + copystrcontent(p1, p2, i1, i2, i3) + jump() + """, """ + [p1, i1, i2, i3] + p2 = call_malloc_nursery_varsize(1, 1, i3, descr=strdescr) + setfield_gc(p2, i3, descr=strlendescr) + copystrcontent(p1, p2, i1, i2, i3) + jump() + """) + + def test_copystrcontent_old(self): self.check_rewrite(""" [p1, p2, i1, i2, i3] copystrcontent(p1, p2, i1, i2, i3) jump() """, """ [p1, p2, i1, i2, i3] - cond_call_stm_b(p2, descr=A2Wdescr) - cond_call_stm_b(p1, descr=A2Rdescr) + cond_call_gc_wb(p2, descr=wbdescr) copystrcontent(p1, p2, i1, i2, i3) - jump() """) From noreply at buildbot.pypy.org Sat Mar 22 12:58:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 12:58:34 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: progress Message-ID: <20140322115834.9028F1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70165:829d58f318f1 Date: 2014-03-22 12:57 +0100 http://bitbucket.org/pypy/pypy/changeset/829d58f318f1/ Log: progress diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -160,9 +160,10 @@ else: raise NotImplementedError(op.getopname()) - def gen_malloc_frame(self, frame_info, frame, size_box): + def gen_malloc_frame(self, frame_info, frame): descrs = self.gc_ll_descr.getframedescrs(self.cpu) - if self.gc_ll_descr.kind == 'boehm' or self.gc_ll_descr.stm: + if self.gc_ll_descr.kind == 'boehm': + size_box = history.BoxInt() op0 = ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], size_box, descr=descrs.jfi_frame_depth) @@ -170,9 +171,9 @@ op1 = ResOperation(rop.NEW_ARRAY, [size_box], frame, descr=descrs.arraydescr) self.handle_new_array(descrs.arraydescr, op1) - else: + elif not self.gc_ll_descr.stm: # we read size in bytes here, not the length - # jfi_frame_size not set in STM! + size_box = history.BoxInt() op0 = ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], size_box, descr=descrs.jfi_frame_size) @@ -186,6 +187,17 @@ self.newops.append(op1) self.gen_initialize_len(frame, length_box, descrs.arraydescr.lendescr) + else: + # jfi_frame_size not set in STM! + length_box = history.BoxInt() + op0 = ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], + length_box, + descr=descrs.jfi_frame_depth) + self.newops.append(op0) + self.gen_malloc_nursery_varsize_frame(length_box, frame) + self.gen_initialize_tid(frame, descrs.arraydescr.tid) + self.gen_initialize_len(frame, length_box, + descrs.arraydescr.lendescr) def handle_call_assembler(self, op): descrs = self.gc_ll_descr.getframedescrs(self.cpu) @@ -193,9 +205,8 @@ assert isinstance(loop_token, history.JitCellToken) jfi = loop_token.compiled_loop_token.frame_info llfi = heaptracker.adr2int(llmemory.cast_ptr_to_adr(jfi)) - size_box = history.BoxInt() frame = history.BoxPtr() - self.gen_malloc_frame(llfi, frame, size_box) + self.gen_malloc_frame(llfi, frame) op2 = ResOperation(rop.SETFIELD_GC, [frame, history.ConstInt(llfi)], None, descr=descrs.jf_frame_info) self.newops.append(op2) @@ -329,7 +340,7 @@ """ self.emitting_an_operation_that_can_collect() op = ResOperation(rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, - [sizebox], + [sizebox], # if STM, this is actually lengthbox! v_result) self.newops.append(op) diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -723,32 +723,28 @@ jump(p1) """ % (op, guard), """ [p1] - cond_call_stm_b(p1, descr=A2Vdescr) + cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, 10, descr=tydescr) %s %s %s - cond_call_stm_b(p1, descr=A2Vdescr) + cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, 20, descr=tydescr) jump(p1) """ % (op, guard, tr_break), calldescr2=calldescr2) def test_call_assembler(self): - py.test.skip("XXX: works, but somehow the test doesn't") - self.check_rewrite(""" [i0, f0] i2 = call_assembler(i0, f0, descr=casmdescr) guard_not_forced()[] """, """ [i0, f0] - i1 = getfield_gc(ConstClass(frame_info), descr=jfi_frame_size) + i1 = getfield_gc(ConstClass(frame_info), descr=jfi_frame_depth) p1 = call_malloc_nursery_varsize_frame(i1) setfield_gc(p1, 0, descr=tiddescr) - stm_set_revision_gc(p1, descr=revdescr) - i2 = getfield_gc(ConstClass(frame_info), descr=jfi_frame_depth) - setfield_gc(p1, i2, descr=framelendescr) + setfield_gc(p1, i1, descr=framelendescr) setfield_gc(p1, ConstClass(frame_info), descr=jf_frame_info) setarrayitem_gc(p1, 0, i0, descr=signedframedescr) setarrayitem_gc(p1, 1, f0, descr=floatframedescr) @@ -817,11 +813,8 @@ jump(i1) """) - def test_ptr_eq_other_direct_cases(self): - py.test.skip("can also keep ptr_eq if both args are L or W, " - "or if one arg is freshly malloced") + # ----------- tests copied from rewrite.py ------------- - # ----------- tests copied from rewrite.py ------------- def test_rewrite_assembler_new_to_malloc(self): self.check_rewrite(""" [p1] @@ -830,7 +823,6 @@ [p1] p0 = call_malloc_nursery(%(sdescr.size)d) setfield_gc(p0, 1234, descr=tiddescr) - stm_set_revision_gc(p0, descr=revdescr) """) def test_rewrite_assembler_new3_to_malloc(self): @@ -844,13 +836,10 @@ p0 = call_malloc_nursery( \ %(sdescr.size + tdescr.size + sdescr.size)d) setfield_gc(p0, 1234, descr=tiddescr) - stm_set_revision_gc(p0, descr=revdescr) p1 = int_add(p0, %(sdescr.size)d) setfield_gc(p1, 5678, descr=tiddescr) - stm_set_revision_gc(p1, descr=revdescr) p2 = int_add(p1, %(tdescr.size)d) setfield_gc(p2, 1234, descr=tiddescr) - stm_set_revision_gc(p2, descr=revdescr) """) def test_rewrite_assembler_new_array_fixed_to_malloc(self): @@ -862,7 +851,6 @@ p0 = call_malloc_nursery( \ %(adescr.basesize + 10 * adescr.itemsize)d) setfield_gc(p0, 4321, descr=tiddescr) - stm_set_revision_gc(p0, descr=revdescr) setfield_gc(p0, 10, descr=alendescr) """) @@ -877,10 +865,8 @@ %(sdescr.size + \ adescr.basesize + 10 * adescr.itemsize)d) setfield_gc(p0, 1234, descr=tiddescr) - stm_set_revision_gc(p0, descr=revdescr) p1 = int_add(p0, %(sdescr.size)d) setfield_gc(p1, 4321, descr=tiddescr) - stm_set_revision_gc(p1, descr=revdescr) setfield_gc(p1, 10, descr=alendescr) """) @@ -892,7 +878,6 @@ [] p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) setfield_gc(p0, 8765, descr=tiddescr) - stm_set_revision_gc(p0, descr=revdescr) setfield_gc(p0, 6, descr=blendescr) """) @@ -907,19 +892,15 @@ [] p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) setfield_gc(p0, 8765, descr=tiddescr) - stm_set_revision_gc(p0, descr=revdescr) setfield_gc(p0, 5, descr=blendescr) p1 = int_add(p0, %(bdescr.basesize + 8)d) setfield_gc(p1, 8765, descr=tiddescr) - stm_set_revision_gc(p1, descr=revdescr) setfield_gc(p1, 5, descr=blendescr) p2 = int_add(p1, %(bdescr.basesize + 8)d) setfield_gc(p2, 8765, descr=tiddescr) - stm_set_revision_gc(p2, descr=revdescr) setfield_gc(p2, 5, descr=blendescr) p3 = int_add(p2, %(bdescr.basesize + 8)d) setfield_gc(p3, 8765, descr=tiddescr) - stm_set_revision_gc(p3, descr=revdescr) setfield_gc(p3, 5, descr=blendescr) """) @@ -932,10 +913,8 @@ [] p0 = call_malloc_nursery(%(4*WORD)d) setfield_gc(p0, 9000, descr=tiddescr) - stm_set_revision_gc(p0, descr=revdescr) p1 = int_add(p0, %(2*WORD)d) setfield_gc(p1, 9000, descr=tiddescr) - stm_set_revision_gc(p1, descr=revdescr) """) def test_rewrite_assembler_variable_size(self): @@ -1015,16 +994,13 @@ p0 = call_malloc_nursery( \ %(2 * (bdescr.basesize + 104))d) setfield_gc(p0, 8765, descr=tiddescr) - stm_set_revision_gc(p0, descr=revdescr) setfield_gc(p0, 101, descr=blendescr) p1 = int_add(p0, %(bdescr.basesize + 104)d) setfield_gc(p1, 8765, descr=tiddescr) - stm_set_revision_gc(p1, descr=revdescr) setfield_gc(p1, 102, descr=blendescr) p2 = call_malloc_nursery( \ %(bdescr.basesize + 104)d) setfield_gc(p2, 8765, descr=tiddescr) - stm_set_revision_gc(p2, descr=revdescr) setfield_gc(p2, 103, descr=blendescr) """) @@ -1051,7 +1027,6 @@ [p1] p0 = call_malloc_nursery(104) # rounded up setfield_gc(p0, 9315, descr=tiddescr) - stm_set_revision_gc(p0, descr=revdescr) setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) """) @@ -1080,11 +1055,9 @@ %(strdescr.basesize + 16 * strdescr.itemsize + \ unicodedescr.basesize + 10 * unicodedescr.itemsize)d) setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) - stm_set_revision_gc(p0, descr=revdescr) setfield_gc(p0, 14, descr=strlendescr) p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) - stm_set_revision_gc(p1, descr=revdescr) setfield_gc(p1, 10, descr=unicodelendescr) p2 = call_malloc_nursery_varsize(2, 4, i2, \ descr=unicodedescr) @@ -1105,10 +1078,9 @@ p1 = call_malloc_nursery( \ %(cdescr.basesize + 5 * cdescr.itemsize)d) setfield_gc(p1, 8111, descr=tiddescr) - stm_set_revision_gc(p1, descr=revdescr) setfield_gc(p1, 5, descr=clendescr) label(p1, i2, p3) - cond_call_stm_b(p1, descr=A2Wdescr) + cond_call_gc_wb(p1, descr=wbdescr) setarrayitem_gc(p1, i2, p3, descr=cdescr) """) @@ -1132,11 +1104,9 @@ p0 = call_malloc_nursery( \ %(2 * (bdescr.basesize + 8))d) setfield_gc(p0, 8765, descr=tiddescr) - stm_set_revision_gc(p0, descr=revdescr) setfield_gc(p0, 5, descr=blendescr) p1 = int_add(p0, %(bdescr.basesize + 8)d) setfield_gc(p1, 8765, descr=tiddescr) - stm_set_revision_gc(p1, descr=revdescr) setfield_gc(p1, 5, descr=blendescr) stm_transaction_break(1) @@ -1144,7 +1114,6 @@ p2 = call_malloc_nursery( \ %(bdescr.basesize + 8)d) setfield_gc(p2, 8765, descr=tiddescr) - stm_set_revision_gc(p2, descr=revdescr) setfield_gc(p2, 5, descr=blendescr) """, calldescr2=calldescr2) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -865,6 +865,7 @@ self.rm.possibly_free_var(tmp_box) # if gc_ll_descr.stm: + xxxxxx self.assembler.malloc_cond_varsize_frame_stm(sizeloc, gcmap) else: self.assembler.malloc_cond_varsize_frame( From noreply at buildbot.pypy.org Sat Mar 22 13:05:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 13:05:10 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: progress Message-ID: <20140322120510.7207F1C0483@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70166:388415f649a5 Date: 2014-03-22 13:04 +0100 http://bitbucket.org/pypy/pypy/changeset/388415f649a5/ Log: progress diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1133,98 +1133,23 @@ V = lltype.GcArray(('z', lltype.Ptr(S)), hints=imm_hint) vdescr = get_array_descr(self.gc_ll_descr, V) vdescr.tid = 1233 - vzdescr = get_interiorfield_descr(self.gc_ll_descr, V, 'z') + #vzdescr = get_interiorfield_descr(self.gc_ll_descr, V, 'z') - # XXX: "A2Idescr" if imm_hint else "A2Rdescr" - barr = "A2Rdescr" if imm_hint else "A2Rdescr" + if imm_hint: + d = {'comment': '#', 'pure': '_pure'} + else: + d = {'comment': '', 'pure': ''} + self.check_rewrite(""" [p1, p3, i1, p4] - p2 = getfield_gc(p1, descr=uxdescr) - i3 = getinteriorfield_gc(p3, i1, descr=vzdescr) - i4 = getarrayitem_gc(p4, i3, descr=vdescr) + p2 = getfield_gc%(pure)s(p1, descr=uxdescr) + i4 = getarrayitem_gc%(pure)s(p4, i1, descr=vdescr) jump(p2) - """, """ + """ % d, """ [p1, p3, i1, p4] - cond_call_stm_b(p1, descr=%s) - p2 = getfield_gc(p1, descr=uxdescr) - cond_call_stm_b(p3, descr=%s) - i3 = getinteriorfield_gc(p3, i1, descr=vzdescr) - cond_call_stm_b(p4, descr=%s) - i4 = getarrayitem_gc(p4, i3, descr=vdescr) - + p2 = getfield_gc%(pure)s(p1, descr=uxdescr) + %(comment)s stm_read(p1) + i4 = getarrayitem_gc%(pure)s(p4, i1, descr=vdescr) + %(comment)s stm_read(p4) jump(p2) - """ % (barr, barr, barr), uxdescr=uxdescr, - vzdescr=vzdescr, vdescr=vdescr) - - def test_noptr_setfields(self): - S = lltype.GcStruct('S') - U = lltype.GcStruct('U', - ('x', lltype.Signed), - ('y', lltype.Ptr(S))) - udescr = get_size_descr(self.gc_ll_descr, U) - udescr.tid = 2123 - uxdescr = get_field_descr(self.gc_ll_descr, U, 'x') - #uydescr = get_field_descr(self.gc_ll_descr, U, 'y') - - V = lltype.GcArray(('z', lltype.Signed)) - vdescr = get_array_descr(self.gc_ll_descr, V) - vdescr.tid = 1233 - vzdescr = get_interiorfield_descr(self.gc_ll_descr, V, 'z') - - self.check_rewrite(""" - [p1, p3, i1, p4] - setfield_gc(p1, 1, descr=uxdescr) - setinteriorfield_gc(p3, i1, 1, descr=vzdescr) - setarrayitem_gc(p4, i1, 1, descr=vdescr) - jump(p3) - """, """ - [p1, p3, i1, p4] - cond_call_stm_b(p1, descr=A2Vdescr) - setfield_gc(p1, 1, descr=uxdescr) - cond_call_stm_b(p3, descr=A2Vdescr) - setinteriorfield_gc(p3, i1, 1, descr=vzdescr) - cond_call_stm_b(p4, descr=A2Vdescr) - setarrayitem_gc(p4, i1, 1, descr=vdescr) - - jump(p3) - """, uxdescr=uxdescr, vzdescr=vzdescr, vdescr=vdescr) - - def test_weaken_previous_barrier(self): - class fakeextrainfo: - oopspecindex=0 - def call_needs_inevitable(self): - return False - T = rffi.CArrayPtr(rffi.TIME_T) - calldescr2 = get_call_descr(self.gc_ll_descr, [T], rffi.TIME_T, - fakeextrainfo()) - - # True: weaken previous barrier - # False: do not weaken - ops = [("stm_transaction_break(1)", False), - ("call(123, descr=cd)", False), - ("label()", False), - ("i2 = int_add(i1, 1)", True) - ] - for op, weaken in ops: - b1 = ("cond_call_stm_b(p1, descr=A2Vdescr)" if weaken - else "cond_call_stm_b(p1, descr=A2Rdescr)") - b2 = ("" if weaken - else "cond_call_stm_b(p1, descr=A2Vdescr)") - self.check_rewrite(""" - [p1, i3] - i1 = getfield_gc(p1, descr=tydescr) # noptr - %s - setfield_gc(p1, i3, descr=tydescr) # noptr - jump(p1) - """ % (op,), """ - [p1, i3] - %s - i1 = getfield_gc(p1, descr=tydescr) - %s - %s - setfield_gc(p1, i3, descr=tydescr) - - jump(p1) - """ % (b1, op, b2), cd=calldescr2) - - + """ % d, uxdescr=uxdescr, vdescr=vdescr) From noreply at buildbot.pypy.org Sat Mar 22 17:18:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 17:18:09 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140322161809.072D91C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70167:813983a8da84 Date: 2014-03-22 16:11 +0100 http://bitbucket.org/pypy/pypy/changeset/813983a8da84/ Log: in-progress diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -79,34 +79,23 @@ else: self.gc_size_of_header = WORD # for tests self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn) - if gc_ll_descr.stm: - descrs = [gc_ll_descr.A2Rdescr, gc_ll_descr.Q2Rdescr, - gc_ll_descr.A2Idescr, gc_ll_descr.A2Vdescr, - gc_ll_descr.A2Wdescr, gc_ll_descr.V2Wdescr] - else: - descrs = [gc_ll_descr.write_barrier_descr] - for d in descrs: - self._build_b_slowpath(d, False) - self._build_b_slowpath(d, True) - self._build_b_slowpath(d, False, for_frame=True) # building the barriers needs to happen before these: self._build_failure_recovery(False, withfloats=False) self._build_failure_recovery(True, withfloats=False) + self._build_wb_slowpath(False) + self._build_wb_slowpath(True) + self._build_wb_slowpath(False, for_frame=True) # only for stm: if gc_ll_descr.stm: - self._build_ptr_eq_slowpath() self._build_stm_longjmp_callback() self.stm_transaction_break_path = self._build_stm_transaction_break_path() - else: - self.ptr_eq_slowpath = None # only one of those self.build_frame_realloc_slowpath() if self.cpu.supports_floats: self._build_failure_recovery(False, withfloats=True) self._build_failure_recovery(True, withfloats=True) - for d in descrs: - self._build_b_slowpath(d, False, withfloats=True) - self._build_b_slowpath(d, True, withfloats=True) + self._build_wb_slowpath(False, withfloats=True) + self._build_wb_slowpath(True, withfloats=True) self._build_propagate_exception_path() if gc_ll_descr.get_malloc_slowpath_addr() is not None: @@ -346,12 +335,14 @@ next.prev = prev @staticmethod + @rgc.no_collect def _release_gil_shadowstack(): before = rffi.aroundstate.before if before: before() @staticmethod + @rgc.no_collect def _reacquire_gil_shadowstack(): after = rffi.aroundstate.after if after: diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -160,6 +160,11 @@ cachedict[fieldname] = fielddescr return fielddescr +def build_stm_tid_field_descr(): + from rpython.rlib import rstm + return FieldDescr('tid', rstm.tid_offset, rffi.sizeof(rstm.TID), + get_type_flag(rstm.TID), False, True) + def get_type_flag(TYPE): if isinstance(TYPE, lltype.Ptr): if TYPE.TO._gckind == 'gc': diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -16,6 +16,7 @@ from rpython.jit.backend.llsupport.descr import GcCache, get_field_descr from rpython.jit.backend.llsupport.descr import get_array_descr from rpython.jit.backend.llsupport.descr import get_call_descr +from rpython.jit.backend.llsupport.descr import build_stm_tid_field_descr from rpython.memory.gctransform import asmgcroot # ____________________________________________________________ @@ -418,12 +419,12 @@ assert self.GCClass.inline_simple_malloc_varsize def _setup_tid(self): + "NOT_RPYTHON" if not self.stm: - self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid') + self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, + 'tid') else: - xxxxxxxx - self.fielddescr_tid = get_field_descr(self, self.GCClass.GCHDR, - 'h_tid') + self.fielddescr_tid = build_stm_tid_field_descr() frame_tid = self.layoutbuilder.get_type_id(jitframe.JITFRAME) self.translator._jit2gc['frame_tid'] = frame_tid diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -39,8 +39,6 @@ if translator and translator.config.translation.gcremovetypeptr: self.vtable_offset = None else: - assert not self.gc_ll_descr.stm, """doesn't work in stm - because it would need a read barrier when reading typeptr""" self.vtable_offset, _ = symbolic.get_field_token(rclass.OBJECT, 'typeptr', translate_support_code) diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -396,12 +396,12 @@ self.reg_bindings[v] = loc return loc - def update_spill_loc_if_necessary(self, var, current_loc): - """if variable var is in two locations (spilled and current_loc), - update spilled location with current_loc""" - spill_loc = self.frame_manager.get(var) - if spill_loc: - self.assembler.regalloc_mov(current_loc, spill_loc) + #def update_spill_loc_if_necessary(self, var, current_loc): + # """if variable var is in two locations (spilled and current_loc), + # update spilled location with current_loc""" + # spill_loc = self.frame_manager.get(var) + # if spill_loc: + # self.assembler.regalloc_mov(current_loc, spill_loc) def _spill_var(self, v, forbidden_vars, selected_reg, need_lower_byte=False): diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -256,8 +256,7 @@ mallocs. (For all I know this latter case never occurs in practice, but better safe than sorry.) """ - if self.gc_ll_descr.fielddescr_tid is not None \ - or self.gc_ll_descr.stm: # framework GC + if self.gc_ll_descr.fielddescr_tid is not None: assert (size & (WORD-1)) == 0, "size not aligned?" addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') args = [ConstInt(addr), ConstInt(size), ConstInt(typeid)] diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -36,8 +36,6 @@ from rpython.rlib.objectmodel import compute_unique_id from rpython.jit.backend.x86 import stmtlocal from rpython.rlib import rstm -from rpython.memory.gc.stmgc import StmGC -from rpython.jit.backend.llsupport.gc import STMBarrierDescr class Assembler386(BaseAssembler): @@ -55,6 +53,7 @@ self.float_const_abs_addr = 0 self.malloc_slowpath = 0 self.malloc_slowpath_varsize = 0 + self.wb_slowpath = [0, 0, 0, 0, 0] self.setup_failure_recovery() self.datablockwrapper = None self.stack_check_slowpath = 0 @@ -423,101 +422,17 @@ rawstart = mc.materialize(self.cpu.asmmemmgr, []) self.stack_check_slowpath = rawstart - - def _build_ptr_eq_slowpath(self): - cpu = self.cpu - assert cpu.gc_ll_descr.stm - # - # SYNCHRONIZE WITH extra.c'S IMPLEMENTATION! - # - # This builds a helper function called from the slow path of - # ptr_eq/ne. It must save all registers, and optionally - # all XMM registers. It takes two values pushed on the stack, - # even on X86_64. It must restore stack alignment accordingly. - mc = codebuf.MachineCodeBlockWrapper() - # - # we want 2 registers: - mc.PUSH_r(esi.value) - mc.PUSH_r(edi.value) - # - # get arguments: ||val2|val1||retaddr|esi||edi| - mc.MOV_rs(esi.value, 3 * WORD) - mc.MOV_rs(edi.value, 4 * WORD) - # - # the fastpath checks if val1==val2 or any of them is NULL - # thus, we only have to get to their h_original - # if they are *not* PREBUILT_ORIGINALS - # - flag = StmGC.GCFLAG_PREBUILT_ORIGINAL - assert (flag >> 32) > 0 and (flag >> 40) == 0 - flag = flag >> 32 - off = 4 - # if !(val1->h_original), leave EDI as is - mc.MOV_rm(X86_64_SCRATCH_REG.value, (edi.value, StmGC.H_ORIGINAL)) - mc.TEST_rr(X86_64_SCRATCH_REG.value, X86_64_SCRATCH_REG.value) - mc.J_il8(rx86.Conditions['Z'], 0) - z1_location = mc.get_relative_pos() - # if val1->h_tid & PREBUILT_ORIGINAL, take h_original - mc.TEST8_mi((edi.value, StmGC.H_TID + off), flag) - mc.CMOVE_rr(edi.value, X86_64_SCRATCH_REG.value) - # - # Do the same for val2=ESI - offset = mc.get_relative_pos() - z1_location - assert 0 < offset <= 127 - mc.overwrite(z1_location - 1, chr(offset)) - # if !(val2->h_original), leave ESI as is - mc.MOV_rm(X86_64_SCRATCH_REG.value, (esi.value, StmGC.H_ORIGINAL)) - mc.TEST_rr(X86_64_SCRATCH_REG.value, X86_64_SCRATCH_REG.value) - mc.J_il8(rx86.Conditions['Z'], 0) - z2_location = mc.get_relative_pos() - # if val2->h_tid & PREBUILT_ORIGINAL, take h_original - mc.TEST8_mi((esi.value, StmGC.H_TID + off), flag) - mc.CMOVE_rr(esi.value, X86_64_SCRATCH_REG.value) - # - # COMPARE - offset = mc.get_relative_pos() - z2_location - assert 0 < offset <= 127 - mc.overwrite(z2_location - 1, chr(offset)) - # - mc.CMP_rr(edi.value, esi.value) - sl = X86_64_SCRATCH_REG.lowest8bits() - mc.SET_ir(rx86.Conditions['Z'], sl.value) - # mov result to val2 on stack - # ||val2|val1||retaddr|esi||edi| - mc.MOV_sr(4 * WORD, X86_64_SCRATCH_REG.value) - # - # Restore everything: - mc.POP_r(edi.value) - mc.POP_r(esi.value) - # ||result|val1|retaddr| - # - # - # only remove one arg: - mc.RET16_i(1 * WORD) - - rawstart = mc.materialize(self.cpu.asmmemmgr, []) - self.ptr_eq_slowpath = rawstart - - - def _build_b_slowpath(self, descr, withcards, withfloats=False, - for_frame=False): - is_stm = self.cpu.gc_ll_descr.stm + def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): + descr = self.cpu.gc_ll_descr.write_barrier_descr exc0, exc1 = None, None if descr is None: return - - if is_stm and withcards: - return - if not withcards: - func = descr.get_barrier_fn(self.cpu, - returns_modified_object=is_stm) - assert func is not None + func = descr.get_write_barrier_fn(self.cpu) else: - assert not is_stm if descr.jit_wb_cards_set == 0: return - func = descr.get_barrier_from_array_fn(self.cpu) + func = descr.get_write_barrier_from_array_fn(self.cpu) if func == 0: return # @@ -529,53 +444,20 @@ mc = codebuf.MachineCodeBlockWrapper() # if not for_frame: - if descr.stmcat in ['A2W', 'A2V']: - # slow fastpath - # check if PRIV_FROM_PROT is set, but not - # WRITE_BARRIER - mc.MOV_rs(X86_64_SCRATCH_REG.value, WORD) - - flag = StmGC.GCFLAG_WRITE_BARRIER >> 32 - off = 4 - assert 0 < flag < 256 - mc.TEST8_mi((X86_64_SCRATCH_REG.value, off), flag) - mc.J_il8(rx86.Conditions['NZ'], 0) - jz1 = mc.get_relative_pos() - # if flag set, jump over the next check & RET - - flag = StmGC.GCFLAG_PRIVATE_FROM_PROTECTED >> 40 - off = 5 - assert 0 < flag < 256 - mc.TEST8_mi((X86_64_SCRATCH_REG.value, off), flag) - mc.J_il8(rx86.Conditions['Z'], 0) - jz2 = mc.get_relative_pos() - # if PRIV_F_PROT, RET - mc.RET() - mc.overwrite(jz2 - 1, chr(mc.get_relative_pos() - jz2)) - mc.overwrite(jz1 - 1, chr(mc.get_relative_pos() - jz1)) - self._push_all_regs_to_frame(mc, [], withfloats, callee_only=True) if IS_X86_32: # we have 2 extra words on stack for retval and we pass 1 extra # arg, so we need to substract 2 words - # ||val|retadr| mc.SUB_ri(esp.value, 2 * WORD) - # ||val|retadr|x|x|| mc.MOV_rs(eax.value, 3 * WORD) # 2 + 1 mc.MOV_sr(0, eax.value) - # ||val|retadr|x|val|| else: - # ||val|retadr|| mc.MOV_rs(edi.value, WORD) else: - # ||retadr| # we have one word to align mc.SUB_ri(esp.value, 7 * WORD) # align and reserve some space - # ||retadr|x||x|x||x|x||x|x|| mc.MOV_sr(WORD, eax.value) # save for later - # ||retadr|x||x|x||x|x||rax|x|| mc.MOVSD_sx(3 * WORD, xmm0.value) - # ||retadr|x||x|x||xmm0|x||rax|x|| if IS_X86_32: mc.MOV_sr(4 * WORD, edx.value) mc.MOV_sr(0, ebp.value) @@ -592,63 +474,41 @@ self._store_and_reset_exception(mc, exc0, exc1) mc.CALL(imm(func)) - - if descr.returns_modified_object: - # new addr in eax, save to now unused arg - if for_frame: - # ||retadr|x||x|x||xmm0|x||rax|x|| - # directly move to rbp - mc.MOV_rr(ebp.value, eax.value) - elif IS_X86_32: - mc.MOV_sr(3 * WORD, eax.value) - # ||val|retadr|x|val|| - # -> ||result|retaddr|x|val|| - else: - mc.MOV_sr(WORD, eax.value) - # ||val|retadr|| -> ||result|retadr|| - + # if withcards: # A final TEST8 before the RET, for the caller. Careful to # not follow this instruction with another one that changes # the status of the CPU flags! - assert not is_stm and not descr.returns_modified_object if IS_X86_32: - mc.MOV_rs(eax.value, 3 * WORD) + mc.MOV_rs(eax.value, 3*WORD) else: mc.MOV_rs(eax.value, WORD) mc.TEST8(addr_add_const(eax, descr.jit_wb_if_flag_byteofs), imm(-0x80)) # + if not for_frame: if IS_X86_32: # ADD touches CPU flags mc.LEA_rs(esp.value, 2 * WORD) self._pop_all_regs_from_frame(mc, [], withfloats, callee_only=True) - - if descr.returns_modified_object: - # preserve argument which now holds the result - mc.RET() - else: - mc.RET16_i(WORD) + mc.RET16_i(WORD) else: if IS_X86_32: - mc.MOV_rs(edx.value, 5 * WORD) - # ||retadr|x||x|x||xmm0|x||rax|x|| + mc.MOV_rs(edx.value, 4 * WORD) mc.MOVSD_xs(xmm0.value, 3 * WORD) mc.MOV_rs(eax.value, WORD) # restore self._restore_exception(mc, exc0, exc1) mc.MOV(exc0, RawEspLoc(WORD * 5, REF)) mc.MOV(exc1, RawEspLoc(WORD * 6, INT)) - mc.LEA_rs(esp.value, 7 * WORD) - # retval already in ebp mc.RET() rawstart = mc.materialize(self.cpu.asmmemmgr, []) if for_frame: - descr.set_b_slowpath(4, rawstart) + self.wb_slowpath[4] = rawstart else: - descr.set_b_slowpath(withcards + 2 * withfloats, rawstart) + self.wb_slowpath[withcards + 2 * withfloats] = rawstart def _build_stm_longjmp_callback(self): @@ -2598,134 +2458,7 @@ offset = mc.get_relative_pos() - jmp2_adr assert 0 < offset <= 127 mc.overwrite(jmp2_adr-1, chr(offset)) - - def malloc_cond_stm(self, size, gcmap): - assert self.cpu.gc_ll_descr.stm - assert size & (WORD-1) == 0 # must be correctly aligned - mc = self.mc - # load nursery_current and nursery_nextlimit - nc = self._get_stm_tl(rstm.get_nursery_current_adr()) - self._tl_segment_if_stm(mc) - mc.MOV_rj(eax.value, nc) - # - mc.LEA_rm(edi.value, (eax.value, size)) - # - # eax=nursery_current, edi=nursery_current+size - self._cond_allocate_in_nursery_or_slowpath(mc, gcmap) - def malloc_cond_varsize_frame_stm(self, sizeloc, gcmap): - assert self.cpu.gc_ll_descr.stm - mc = self.mc - if sizeloc is eax: - self.mc.MOV(edi, sizeloc) - sizeloc = edi - - nc = self._get_stm_tl(rstm.get_nursery_current_adr()) - self._tl_segment_if_stm(mc) - mc.MOV_rj(eax.value, nc) - - if sizeloc is edi: - self.mc.ADD_rr(edi.value, eax.value) - else: - self.mc.LEA_ra(edi.value, (eax.value, sizeloc.value, 0, 0)) - # - # eax=nursery_current, edi=nursery_current+size - self._cond_allocate_in_nursery_or_slowpath(mc, gcmap) - - def malloc_cond_varsize_stm(self, kind, lengthloc, itemsize, - maxlength, gcmap, arraydescr): - assert self.cpu.gc_ll_descr.stm - from rpython.jit.backend.llsupport.descr import ArrayDescr - assert isinstance(arraydescr, ArrayDescr) - - mc = self.mc - nc = self._get_stm_tl(rstm.get_nursery_current_adr()) - nnl = self._get_stm_tl(rstm.get_nursery_nextlimit_adr()) - - # lengthloc is the length of the array, which we must not modify! - assert lengthloc is not eax and lengthloc is not edi - if isinstance(lengthloc, RegLoc): - varsizeloc = lengthloc - else: - mc.MOV(edi, lengthloc) - varsizeloc = edi - - mc.CMP(varsizeloc, imm(maxlength)) - mc.J_il8(rx86.Conditions['A'], 0) # patched later - jmp_adr0 = mc.get_relative_pos() - - self._tl_segment_if_stm(mc) - mc.MOV_rj(eax.value, nc) - - if valid_addressing_size(itemsize): - shift = get_scale(itemsize) - else: - shift = self._imul_const_scaled(mc, edi.value, - varsizeloc.value, itemsize) - varsizeloc = edi - # now varsizeloc is a register != eax. The size of - # the variable part of the array is (varsizeloc << shift) - assert arraydescr.basesize >= self.gc_minimal_size_in_nursery - constsize = arraydescr.basesize + self.gc_size_of_header - force_realignment = (itemsize % WORD) != 0 - if force_realignment: - constsize += WORD - 1 - mc.LEA_ra(edi.value, (eax.value, varsizeloc.value, shift, - constsize)) - if force_realignment: - mc.AND_ri(edi.value, ~(WORD - 1)) - # now edi contains the total size in bytes, rounded up to a multiple - # of WORD, plus nursery_free_adr - self._tl_segment_if_stm(mc) - mc.CMP_rj(edi.value, nnl) - mc.J_il8(rx86.Conditions['NA'], 0) # patched later - jmp_adr1 = mc.get_relative_pos() - # - # == SLOWPATH == - offset = mc.get_relative_pos() - jmp_adr0 - assert 0 < offset <= 127 - mc.overwrite(jmp_adr0-1, chr(offset)) - # save the gcmap - self.push_gcmap(mc, gcmap, mov=True) # mov into RawEspLoc(0) - if kind == rewrite.FLAG_ARRAY: - mc.MOV_si(WORD, itemsize) - mc.MOV(edi, lengthloc) - mc.MOV_ri(eax.value, arraydescr.tid) - addr = self.malloc_slowpath_varsize - else: - if kind == rewrite.FLAG_STR: - addr = self.malloc_slowpath_str - else: - assert kind == rewrite.FLAG_UNICODE - addr = self.malloc_slowpath_unicode - mc.MOV(edi, lengthloc) - mc.CALL(imm(addr)) - mc.JMP_l8(0) # jump to done, patched later - jmp_location = mc.get_relative_pos() - # - # == FASTPATH == - offset = mc.get_relative_pos() - jmp_adr1 - assert 0 < offset <= 127 - mc.overwrite(jmp_adr1-1, chr(offset)) - # - # set stm_nursery_current - self._tl_segment_if_stm(mc) - mc.MOV_jr(nc, edi.value) - # - # write down the tid - mc.MOV(mem(eax, 0), imm(arraydescr.tid)) - # also set private_rev_num: - rn = self._get_stm_private_rev_num_addr() - self._tl_segment_if_stm(mc) - mc.MOV_rj(X86_64_SCRATCH_REG.value, rn) - mc.MOV(mem(eax, StmGC.H_REVISION), X86_64_SCRATCH_REG) - # - # == END == - offset = mc.get_relative_pos() - jmp_location - assert 0 < offset <= 127 - mc.overwrite(jmp_location - 1, chr(offset)) - - def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, gcmap): assert not self.cpu.gc_ll_descr.stm assert size & (WORD-1) == 0 # must be correctly aligned @@ -2742,15 +2475,20 @@ self.mc.overwrite(jmp_adr-1, chr(offset)) self.mc.MOV(heap(nursery_free_adr), edi) - def malloc_cond_varsize_frame(self, nursery_free_adr, nursery_top_adr, sizeloc, gcmap): - assert not self.cpu.gc_ll_descr.stm + # 'sizeloc' is the size in bytes if not STM; and the length of + # the array to allocate if STM if sizeloc is eax: self.mc.MOV(edi, sizeloc) sizeloc = edi self.mc.MOV(eax, heap(nursery_free_adr)) - if sizeloc is edi: + if self.cpu.gc_ll_descr.stm: + constsize = self.cpu.get_baseofs_of_frame_field() + shift = get_scale(WORD) + self.mc.LEA_ra(edi.value, (eax.value, sizeloc.value, shift, + constsize)) + elif sizeloc is edi: self.mc.ADD_rr(edi.value, eax.value) else: self.mc.LEA_ra(edi.value, (eax.value, sizeloc.value, 0, 0)) @@ -2914,7 +2652,29 @@ self._emit_guard_not_forced(guard_token) - + def genop_discard_stm_read(self, op, arglocs): + assert IS_X86_64, "needed for X86_64_SCRATCH_REG" + mc = self.mc + rm8reg = X86_64_SCRATCH_REG.value | BYTE_REG_FLAG + xxxxxx #load STM_SEGMENT->transaction_read_version into rm8reg + loc_src, loc_tmp = arglocs + if tmp_loc is None: + assert isinstance(loc_src, ImmedLoc) + assert loc_src.value > 0 + mem = loc_src.value >> 4 + assert rx86.fits_in_32bits(mem) + tl_segment_prefix(mc) + mc.MOV8_jr(mem, rm8reg) + else: + assert isinstance(loc_tmp, RegLoc) + if isinstance(loc_src, ImmedLoc): + mc.MOV_ri(loc_tmp.value, loc_src.value >> 4) + else: + if loc_tmp is not loc_src: + mc.MOV(loc_tmp, loc_src) + mc.SHR_ri(loc_tmp.value, 4) + tl_segment_prefix(mc) + mc.MOV8_mr((loc_tmp.value, 0), rm8reg) genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -864,14 +864,10 @@ gcmap = self.get_gcmap([eax, edi]) # allocate the gcmap *before* self.rm.possibly_free_var(tmp_box) # - if gc_ll_descr.stm: - xxxxxx - self.assembler.malloc_cond_varsize_frame_stm(sizeloc, gcmap) - else: - self.assembler.malloc_cond_varsize_frame( - gc_ll_descr.get_nursery_free_addr(), - gc_ll_descr.get_nursery_top_addr(), - sizeloc, gcmap) + self.assembler.malloc_cond_varsize_frame( + gc_ll_descr.get_nursery_free_addr(), + gc_ll_descr.get_nursery_top_addr(), + sizeloc, gcmap) def consider_call_malloc_nursery_varsize(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr @@ -1369,6 +1365,20 @@ def consider_keepalive(self, op): pass + def consider_stm_read(self, op): + loc_src = self.loc(op.getarg(0)) + self.possibly_free_vars_for_op(op) + # this will get in 'loc_tmp' a register that is the same as + # 'loc_src' if the op.getarg(0) is freed now + if (isinstance(loc_src, ImmedLoc) and + rx86.fits_in_32bits(loc_src.value >> 4)): + loc_tmp = None + else: + tmpxvar = TempBox() + loc_tmp = self.rm.force_allocate_reg(tmpxvar) + self.rm.possibly_free_var(tmpxvar) + self.perform_discard(op, [loc_src, loc_tmp]) + def not_implemented_op(self, op): not_implemented("not implemented operation: %s" % op.getopname()) diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -4,6 +4,11 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rlib.jit import dont_look_inside + +TID = rffi.UINT +tid_offset = CDefinedIntSymbolic('offsetof(struct rpyobj_s, tid)', default=4) + + @dont_look_inside def get_nursery_current_adr(): addr = llop.stm_get_adr_of_nursery_current(llmemory.Address) From noreply at buildbot.pypy.org Sat Mar 22 17:18:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 17:18:10 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140322161810.5E8BD1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70168:c851d6840147 Date: 2014-03-22 17:15 +0100 http://bitbucket.org/pypy/pypy/changeset/c851d6840147/ Log: in-progress diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -564,12 +564,20 @@ self.for_test_only.x = x0 + x1 + x2 + x3 def get_nursery_free_addr(self): - nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) - return rffi.cast(lltype.Signed, nurs_addr) + if self.stm: + from rpython.rlib import rstm + return rstm.adr_nursery_free + else: + nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address) + return rffi.cast(lltype.Signed, nurs_addr) def get_nursery_top_addr(self): - nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) - return rffi.cast(lltype.Signed, nurs_top_addr) + if self.stm: + from rpython.rlib import rstm + return rstm.adr_nursery_top + else: + nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address) + return rffi.cast(lltype.Signed, nurs_top_addr) def initialize(self): pass diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -73,7 +73,7 @@ if WORD == 8: self.pending_memoryerror_trampoline_from = [] self.error_trampoline_64 = 0 - self.mc = codebuf.MachineCodeBlockWrapper() + self.mc = codebuf.MachineCodeBlockWrapper(self.cpu) #assert self.datablockwrapper is None --- but obscure case # possible, e.g. getting MemoryError and continuing allblocks = self.get_asmmemmgr_blocks(looptoken) @@ -112,7 +112,7 @@ mc.MOV_bi(extra_ofs, value) def build_frame_realloc_slowpath(self): - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) self._push_all_regs_to_frame(mc, [], self.cpu.supports_floats) # this is the gcmap stored by push_gcmap(mov=True) in _check_stack_frame mc.MOV_rs(ecx.value, WORD) @@ -156,7 +156,7 @@ """ This builds a general call slowpath, for whatever call happens to come. """ - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) # copy registers to the frame, with the exception of the # 'cond_call_register_arguments' and eax, because these have already # been saved by the caller. Note that this is not symmetrical: @@ -196,7 +196,7 @@ This function does not have to preserve registers. It expects all registers to be saved in the caller. """ - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) # store the gc pattern ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') mc.MOV_rs(ecx.value, WORD) @@ -250,7 +250,7 @@ This function must preserve all registers apart from eax and edi. """ assert kind in ['fixed', 'str', 'unicode', 'var'] - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) self._push_all_regs_to_frame(mc, [eax, edi], self.cpu.supports_floats) # store the gc pattern ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') @@ -305,14 +305,9 @@ self._reload_frame_if_necessary(mc, align_stack=True) self.set_extra_stack_depth(mc, 0) self._pop_all_regs_from_frame(mc, [eax, edi], self.cpu.supports_floats) - if self.cpu.gc_ll_descr.stm: - # load nursery_current into EDI - nc = self._get_stm_tl(rstm.get_nursery_current_adr()) - self._tl_segment_if_stm(mc) - mc.MOV_rj(edi.value, nc) - else: - nursery_free_adr = self.cpu.gc_ll_descr.get_nursery_free_addr() - mc.MOV(edi, heap(nursery_free_adr)) # load this in EDI + nursery_free_adr = self.cpu.gc_ll_descr.get_nursery_free_addr() + mc.SEGC7() + mc.MOV(edi, heap(nursery_free_adr)) # load this in EDI # clear the gc pattern mc.MOV_bi(ofs, 0) mc.RET() @@ -333,7 +328,7 @@ if not self.cpu.propagate_exception_descr: return # not supported (for tests, or non-translated) # - self.mc = codebuf.MachineCodeBlockWrapper() + self.mc = codebuf.MachineCodeBlockWrapper(self.cpu) # # read and reset the current exception @@ -350,22 +345,6 @@ self.propagate_exception_path = rawstart self.mc = None - def _get_stm_tl(self, adr): - """Makes 'adr' relative to threadlocal-base if we run in STM. - Before using such a relative address, call _tl_segment_if_stm().""" - if self.cpu.gc_ll_descr.stm and we_are_translated(): - # only for STM and not during tests - result = adr - stmtlocal.threadlocal_base() - assert rx86.fits_in_32bits(result) - return result - return adr - - def _tl_segment_if_stm(self, mc): - """Insert segment prefix for thread-local memory if we run - in STM and not during testing.""" - if self.cpu.gc_ll_descr.stm and we_are_translated(): - stmtlocal.tl_segment_prefix(mc) - def _build_stack_check_slowpath(self): if self.cpu.gc_ll_descr.stm: return # XXX no stack check on STM for now @@ -381,7 +360,7 @@ # | my own retaddr | <-- esp # +---------------------+ # - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) # if IS_X86_64: # on the x86_64, we have to save all the registers that may @@ -402,8 +381,8 @@ else: mc.ADD_ri(esp.value, WORD) # - ea = self._get_stm_tl(self.cpu.pos_exception()) - self._tl_segment_if_stm(mc) + ea = mc.in_tl_segment(self.cpu.pos_exception()) + mc.SEGTL() mc.MOV(eax, heap(ea)) mc.TEST_rr(eax.value, eax.value) mc.J_il8(rx86.Conditions['NZ'], 0) @@ -441,7 +420,7 @@ # all XMM registers. It takes a single argument just pushed # on the stack even on X86_64. It must restore stack alignment # accordingly. - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) # if not for_frame: self._push_all_regs_to_frame(mc, [], withfloats, callee_only=True) @@ -518,7 +497,7 @@ # # make the stm_longjmp_callback() function, with signature # void (*longjmp_callback)(void *stm_resume_buffer) - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) # # 'edi' contains the stm resume buffer, so the new stack # location that we have to enforce is 'edi - FRAME_FIXED_SIZE * WORD'. @@ -529,9 +508,9 @@ # # restore the shadowstack pointer from stm_resume_buffer[1] gcrootmap = self.cpu.gc_ll_descr.gcrootmap - rst = self._get_stm_tl(gcrootmap.get_root_stack_top_addr()) + rst = mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE + 1) * WORD) - self._tl_segment_if_stm(mc) + mc.SEGTL() mc.MOV_jr(rst, eax.value) # # must restore 'ebp' from its saved value in the shadowstack @@ -702,7 +681,7 @@ assert rx86.fits_in_32bits(relative_target) # if not tok.is_guard_not_invalidated: - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) mc.writeimm32(relative_target) mc.copy_to_raw_memory(addr) else: @@ -727,7 +706,7 @@ if WORD == 8: for pos_after_jz in self.pending_memoryerror_trampoline_from: assert self.error_trampoline_64 != 0 # only if non-empty - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) mc.writeimm32(self.error_trampoline_64 - pos_after_jz) mc.copy_to_raw_memory(rawstart + pos_after_jz - 4) @@ -786,7 +765,7 @@ self.frame_depth_to_patch.append(ofs2) def _patch_frame_depth(self, adr, allocated_depth): - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) mc.writeimm32(allocated_depth) mc.copy_to_raw_memory(adr) @@ -811,7 +790,7 @@ # that. Otherwise, leave the original rel32 to the recovery stub in # place, but clobber the recovery stub with a jump to the real # target. - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) if rx86.fits_in_32bits(offset): mc.writeimm32(offset) mc.copy_to_raw_memory(adr_jump_offset) @@ -894,7 +873,7 @@ fn = stmtlocal.stm_invalidate_jmp_buf_fn self.mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) # there could have been a collection in invalidate_jmp_buf() - self._reload_frame_if_necessary(self.mc) + self._reload_frame_if_necessary(self.mc, wb=False) # the return value is the jitframe self.mc.MOV_rr(eax.value, ebp.value) @@ -916,9 +895,9 @@ that gives the address of the stack top. If this integer doesn't fit in 32 bits, it will be loaded in r11. """ - rst = self._get_stm_tl(gcrootmap.get_root_stack_top_addr()) + rst = mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) if rx86.fits_in_32bits(rst): - self._tl_segment_if_stm(mc) + mc.SEGTL() mc.MOV_rj(ebx.value, rst) # MOV ebx, [rootstacktop] else: mc.MOV_ri(X86_64_SCRATCH_REG.value, rst) # MOV r11, rootstacktop @@ -934,9 +913,9 @@ rst = self._load_shadowstack_top_in_ebx(self.mc, gcrootmap) self.mc.MOV_mr((ebx.value, 0), ebp.value) # MOV [ebx], ebp self.mc.ADD_ri(ebx.value, WORD) - + if rx86.fits_in_32bits(rst): - self._tl_segment_if_stm(self.mc) + self.mc.SEGTL() self.mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx else: # The integer 'rst' doesn't fit in 32 bits, so we know that @@ -946,9 +925,9 @@ ebx.value) # MOV [r11], ebx def _call_footer_shadowstack(self, gcrootmap): - rst = self._get_stm_tl(gcrootmap.get_root_stack_top_addr()) + rst = self.mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) if rx86.fits_in_32bits(rst): - self._tl_segment_if_stm(self.mc) + self.mc.SEGTL() self.mc.SUB_ji8(rst, WORD) # SUB [rootstacktop], WORD else: self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop @@ -968,7 +947,7 @@ baseofs = self.cpu.get_baseofs_of_frame_field() newlooptoken.compiled_loop_token.update_frame_info( oldlooptoken.compiled_loop_token, baseofs) - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) mc.JMP(imm(target)) if WORD == 4: # keep in sync with prepare_loop() assert mc.get_relative_pos() == 5 @@ -1215,25 +1194,17 @@ cb = callbuilder.CallBuilder(self, fnloc, arglocs) cb.emit_no_collect() - def _reload_frame_if_necessary(self, mc, align_stack=False): + def _reload_frame_if_necessary(self, mc, align_stack=False, wb=True): gc_ll_descr = self.cpu.gc_ll_descr gcrootmap = gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - rst = self._get_stm_tl(gcrootmap.get_root_stack_top_addr()) - self._tl_segment_if_stm(mc) + rst = mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) + mc.SEGTL() mc.MOV(ecx, heap(rst)) mc.MOV(ebp, mem(ecx, -WORD)) # - if gcrootmap and gcrootmap.is_stm: - if not hasattr(gc_ll_descr, 'A2Wdescr'): - raise Exception("unreachable code") - wbdescr = gc_ll_descr.A2Wdescr - self._stm_barrier_fastpath(mc, wbdescr, [ebp], is_frame=True, - align_stack=align_stack) - return - # wbdescr = gc_ll_descr.write_barrier_descr - if gcrootmap and wbdescr: + if gcrootmap and wbdescr and wb: # frame never uses card marking, so we enforce this is not # an array self._write_barrier_fastpath(mc, wbdescr, [ebp], array=False, @@ -1745,8 +1716,8 @@ def genop_guard_guard_no_exception(self, ign_1, guard_op, guard_token, locs, ign_2): - ea = self._get_stm_tl(self.cpu.pos_exception()) - self._tl_segment_if_stm(self.mc) + ea = self.mc.in_tl_segment(self.cpu.pos_exception()) + self.mc.SEGTL() self.mc.CMP(heap(ea), imm0) self.implement_guard(guard_token, 'NZ') @@ -1760,8 +1731,8 @@ locs, resloc): loc = locs[0] loc1 = locs[1] - ea = self._get_stm_tl(self.cpu.pos_exception()) - self._tl_segment_if_stm(self.mc) + ea = self.mc.in_tl_segment(self.cpu.pos_exception()) + self.mc.SEGTL() self.mc.MOV(loc1, heap(ea)) self.mc.CMP(loc1, loc) self.implement_guard(guard_token, 'NE') @@ -1772,42 +1743,43 @@ """ Resest the exception. If excvalloc is None, then store it on the frame in jf_guard_exc """ - eva = self._get_stm_tl(self.cpu.pos_exc_value()) - ea = self._get_stm_tl(self.cpu.pos_exception()) + eva = mc.in_tl_segment(self.cpu.pos_exc_value()) + ea = mc.in_tl_segment(self.cpu.pos_exception()) # - self._tl_segment_if_stm(mc) if excvalloc is not None: assert excvalloc.is_core_reg() + mc.SEGTL() mc.MOV(excvalloc, heap(eva)) elif tmploc is not None: # if both are None, just ignore ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') + mc.SEGTL() mc.MOV(tmploc, heap(eva)) mc.MOV(RawEbpLoc(ofs), tmploc) # if exctploc is not None: assert exctploc.is_core_reg() - self._tl_segment_if_stm(mc) + mc.SEGTL() mc.MOV(exctploc, heap(ea)) # - self._tl_segment_if_stm(mc) + mc.SEGTL() mc.MOV(heap(ea), imm0) - self._tl_segment_if_stm(mc) + mc.SEGTL() mc.MOV(heap(eva), imm0) def _restore_exception(self, mc, excvalloc, exctploc, tmploc=None): - eva = self._get_stm_tl(self.cpu.pos_exc_value()) - ea = self._get_stm_tl(self.cpu.pos_exception()) + eva = mc.in_tl_segment(self.cpu.pos_exc_value()) + ea = mc.in_tl_segment(self.cpu.pos_exception()) if excvalloc is not None: - self._tl_segment_if_stm(mc) + mc.SEGTL() mc.MOV(heap(eva), excvalloc) else: assert tmploc is not None ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') mc.MOV(tmploc, RawEbpLoc(ofs)) mc.MOV_bi(ofs, 0) - self._tl_segment_if_stm(mc) + mc.SEGTL() mc.MOV(heap(eva), tmploc) - self._tl_segment_if_stm(mc) + mc.SEGTL() mc.MOV(heap(ea), exctploc) def _gen_guard_overflow(self, guard_op, guard_token): @@ -2000,20 +1972,20 @@ mc.MOVSD_xb(i, (ofs + i * coeff) * WORD + base_ofs) def _build_failure_recovery(self, exc, withfloats=False): - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self.cpu) self.mc = mc self._push_all_regs_to_frame(mc, [], withfloats) if exc: # We might have an exception pending. Load it into ebx... - eva = self._get_stm_tl(self.cpu.pos_exc_value()) - ea = self._get_stm_tl(self.cpu.pos_exception()) - self._tl_segment_if_stm(mc) + eva = mc.in_tl_segment(self.cpu.pos_exc_value()) + ea = mc.in_tl_segment(self.cpu.pos_exception()) + mc.SEGTL() mc.MOV(ebx, heap(eva)) - self._tl_segment_if_stm(mc) + mc.SEGTL() mc.MOV(heap(ea), imm0) - self._tl_segment_if_stm(mc) + mc.SEGTL() mc.MOV(heap(eva), imm0) # ...and save ebx into 'jf_guard_exc' offset = self.cpu.get_ofs_of_frame_field('jf_guard_exc') @@ -2424,46 +2396,13 @@ # XXX if the next operation is a GUARD_NO_EXCEPTION, we should # somehow jump over it too in the fast path - - def _cond_allocate_in_nursery_or_slowpath(self, mc, gcmap): - # needed for slowpath: - # eax = nursery_current - # edi = nursery_current + size - # - # cmp nursery_current+size > nursery_nextlimit - nnl = self._get_stm_tl(rstm.get_nursery_nextlimit_adr()) - self._tl_segment_if_stm(mc) - mc.CMP_rj(edi.value, nnl) - mc.J_il8(rx86.Conditions['NA'], 0) # patched later - jmp_adr = mc.get_relative_pos() - # - # == SLOWPATH == - # save the gcmap - self.push_gcmap(mc, gcmap, mov=True) - mc.CALL(imm(self.malloc_slowpath)) - mc.JMP_l8(0) # XXX: is JMP over 1 instr good? - jmp2_adr = mc.get_relative_pos() - # - # == FASTPATH == - offset = mc.get_relative_pos() - jmp_adr - assert 0 < offset <= 127 - mc.overwrite(jmp_adr-1, chr(offset)) - # - # stm_nursery_current = stm_nursery_current+size - nc = self._get_stm_tl(rstm.get_nursery_current_adr()) - self._tl_segment_if_stm(mc) - mc.MOV_jr(nc, edi.value) - # - # END - offset = mc.get_relative_pos() - jmp2_adr - assert 0 < offset <= 127 - mc.overwrite(jmp2_adr-1, chr(offset)) def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, gcmap): - assert not self.cpu.gc_ll_descr.stm assert size & (WORD-1) == 0 # must be correctly aligned + self.mc.SEGC7() self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edi.value, (eax.value, size)) + self.mc.SEGC7() self.mc.CMP(edi, heap(nursery_top_adr)) self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() @@ -2473,6 +2412,7 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) + self.mc.SEGC7() self.mc.MOV(heap(nursery_free_adr), edi) def malloc_cond_varsize_frame(self, nursery_free_adr, nursery_top_adr, @@ -2482,6 +2422,7 @@ if sizeloc is eax: self.mc.MOV(edi, sizeloc) sizeloc = edi + self.mc.SEGC7() self.mc.MOV(eax, heap(nursery_free_adr)) if self.cpu.gc_ll_descr.stm: constsize = self.cpu.get_baseofs_of_frame_field() @@ -2492,6 +2433,7 @@ self.mc.ADD_rr(edi.value, eax.value) else: self.mc.LEA_ra(edi.value, (eax.value, sizeloc.value, 0, 0)) + self.mc.SEGC7() self.mc.CMP(edi, heap(nursery_top_adr)) self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() @@ -2501,6 +2443,7 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) + self.mc.SEGC7() self.mc.MOV(heap(nursery_free_adr), edi) def malloc_cond_varsize(self, kind, nursery_free_adr, nursery_top_adr, @@ -2522,6 +2465,7 @@ self.mc.J_il8(rx86.Conditions['A'], 0) # patched later jmp_adr0 = self.mc.get_relative_pos() + self.mc.SEGC7() self.mc.MOV(eax, heap(nursery_free_adr)) if valid_addressing_size(itemsize): shift = get_scale(itemsize) @@ -2542,6 +2486,7 @@ self.mc.AND_ri(edi.value, ~(WORD - 1)) # now edi contains the total size in bytes, rounded up to a multiple # of WORD, plus nursery_free_adr + self.mc.SEGC7() self.mc.CMP(edi, heap(nursery_top_adr)) self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr1 = self.mc.get_relative_pos() @@ -2573,6 +2518,7 @@ # write down the tid, but not if it's the result of the CALL self.mc.MOV(mem(eax, 0), imm(arraydescr.tid)) # while we're at it, this line is not needed if we've done the CALL + self.mc.SEGC7() self.mc.MOV(heap(nursery_free_adr), edi) # offset = self.mc.get_relative_pos() - jmp_location @@ -2655,16 +2601,18 @@ def genop_discard_stm_read(self, op, arglocs): assert IS_X86_64, "needed for X86_64_SCRATCH_REG" mc = self.mc - rm8reg = X86_64_SCRATCH_REG.value | BYTE_REG_FLAG - xxxxxx #load STM_SEGMENT->transaction_read_version into rm8reg + rmreg = X86_64_SCRATCH_REG.value + mc.SEGC7() + mc.MOVZX8_rj(rmreg, rstm.adr_transaction_read_version) + # loc_src, loc_tmp = arglocs if tmp_loc is None: assert isinstance(loc_src, ImmedLoc) assert loc_src.value > 0 mem = loc_src.value >> 4 assert rx86.fits_in_32bits(mem) - tl_segment_prefix(mc) - mc.MOV8_jr(mem, rm8reg) + mc.SEGC7() + mc.MOV8_jr(mem, rmreg | rx86.BYTE_REG_FLAG) else: assert isinstance(loc_tmp, RegLoc) if isinstance(loc_src, ImmedLoc): @@ -2673,8 +2621,8 @@ if loc_tmp is not loc_src: mc.MOV(loc_tmp, loc_src) mc.SHR_ri(loc_tmp.value, 4) - tl_segment_prefix(mc) - mc.MOV8_mr((loc_tmp.value, 0), rm8reg) + mc.SEGC7() + mc.MOV8_mr((loc_tmp.value, 0), rmreg | rx86.BYTE_REG_FLAG) genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST diff --git a/rpython/jit/backend/x86/codebuf.py b/rpython/jit/backend/x86/codebuf.py --- a/rpython/jit/backend/x86/codebuf.py +++ b/rpython/jit/backend/x86/codebuf.py @@ -1,5 +1,6 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import intmask +from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_start, debug_print, debug_stop from rpython.rlib.debug import have_debug_prints from rpython.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin @@ -21,7 +22,8 @@ class MachineCodeBlockWrapper(BlockBuilderMixin, LocationCodeBuilder, codebuilder_cls): - def __init__(self): + def __init__(self, cpu): + self.stm = cpu.gc_ll_descr.stm self.init_block_builder() # a list of relative positions; for each position p, the bytes # at [p-4:p] encode an absolute address that will need to be @@ -52,3 +54,30 @@ adr[0] = intmask(adr[0] - p) valgrind.discard_translations(addr, self.get_relative_pos()) self._dump(addr, "jit-backend-dump", backend_name) + + def in_tl_segment(self, adr): + """Makes 'adr' relative to threadlocal-base if we run in STM. + Before using such a relative address, call SEGTL().""" + if self.stm and we_are_translated(): + # only for STM and not during tests + from rpython.jit.backend.x86 import stmtlocal, rx86 + result = adr - stmtlocal.threadlocal_base() + assert rx86.fits_in_32bits(result) + return result + return adr + + def SEGTL(self): + """Insert segment prefix for thread-local memory if we run + in STM and not during testing. This is used to access thread-local + data structures like the struct stm_thread_local_s.""" + if self.stm and we_are_translated(): + from rpython.jit.backend.x86 import stmtlocal + stmtlocal.tl_segment_prefix(self) + + def SEGC7(self): + """Insert segment prefix for the stmgc-c7 segment of memory + if we run in STM and not during testing. This is used to access + any GC object, or things in the STM_SEGMENT structure.""" + if self.stm and we_are_translated(): + from rpython.jit.backend.x86 import stmtlocal + stmtlocal.c7_segment_prefix(self) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -839,13 +839,10 @@ gcmap = self.get_gcmap([eax, edi]) # allocate the gcmap *before* self.rm.possibly_free_var(tmp_box) # - if gc_ll_descr.stm: - self.assembler.malloc_cond_stm(size, gcmap) - else: - self.assembler.malloc_cond( - gc_ll_descr.get_nursery_free_addr(), - gc_ll_descr.get_nursery_top_addr(), - size, gcmap) + self.assembler.malloc_cond( + gc_ll_descr.get_nursery_free_addr(), + gc_ll_descr.get_nursery_top_addr(), + size, gcmap) def consider_call_malloc_nursery_varsize_frame(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr @@ -893,16 +890,11 @@ # itemsize = op.getarg(1).getint() maxlength = (gc_ll_descr.max_size_of_young_obj - WORD * 2) / itemsize - if gc_ll_descr.stm: - self.assembler.malloc_cond_varsize_stm( - op.getarg(0).getint(), - lengthloc, itemsize, maxlength, gcmap, arraydescr) - else: - self.assembler.malloc_cond_varsize( - op.getarg(0).getint(), - gc_ll_descr.get_nursery_free_addr(), - gc_ll_descr.get_nursery_top_addr(), - lengthloc, itemsize, maxlength, gcmap, arraydescr) + self.assembler.malloc_cond_varsize( + op.getarg(0).getint(), + gc_ll_descr.get_nursery_free_addr(), + gc_ll_descr.get_nursery_top_addr(), + lengthloc, itemsize, maxlength, gcmap, arraydescr) def get_gcmap(self, forbidden_regs=[], noregs=False): frame_depth = self.fm.get_frame_depth() diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -131,7 +131,7 @@ rstm.stop_all_other_threads() for addr, tgt in looptoken.compiled_loop_token.invalidate_positions: - mc = codebuf.MachineCodeBlockWrapper() + mc = codebuf.MachineCodeBlockWrapper(self) mc.JMP_l(tgt) assert mc.get_relative_pos() == 5 # [JMP] [tgt 4 bytes] mc.copy_to_raw_memory(addr - 1) diff --git a/rpython/jit/backend/x86/stmtlocal.py b/rpython/jit/backend/x86/stmtlocal.py --- a/rpython/jit/backend/x86/stmtlocal.py +++ b/rpython/jit/backend/x86/stmtlocal.py @@ -32,6 +32,10 @@ else: mc.writechar('\x64') # %fs: +def c7_segment_prefix(mc): + assert WORD == 8 + mc.writechar('\x65') # %gs: + # special STM functions called directly by the JIT backend stm_should_break_transaction_fn = rffi.llexternal( diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1546,7 +1546,7 @@ _callable=lambda : False) FUNC = lltype.typeOf(self.stm_should_break_transaction).TO - ei = EffectInfo([], [], [], [], + ei = EffectInfo([], [], [], [], [], [], EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION, can_invalidate=False) diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -33,7 +33,9 @@ VISIT_FPTR = lltype.Ptr(lltype.FuncType([llmemory.Address], lltype.Void)) - minimal_size_in_nursery = llmemory.sizeof(HDR) + JIT_WB_IF_FLAG = 0x01 # value of _STM_GCFLAG_WRITE_BARRIER + stm_fast_alloc = 66*1024 # value of _STM_FAST_ALLOC in stmgc.h + minimal_size_in_nursery = 16 # hard-coded lower limit TRANSLATION_PARAMS = { } @@ -97,7 +99,7 @@ @classmethod def JIT_max_size_of_young_obj(cls): - return cls.GC_NURSERY_SECTION + return cls.stm_fast_alloc @classmethod def JIT_minimal_size_in_nursery(cls): diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -773,6 +773,7 @@ v_typeid], resultvar=op.result) def _gc_adr_of_gc_attr(self, hop, attrname): + assert not self.translator.config.translation.stm if getattr(self.gcdata.gc, attrname, None) is None: raise NotImplementedError("gc_adr_of_%s only for generational gcs" % (attrname,)) @@ -780,7 +781,6 @@ ofs = llmemory.offsetof(self.c_const_gc.concretetype.TO, 'inst_' + attrname) c_ofs = rmodel.inputconst(lltype.Signed, ofs) - assert not self.translator.config.translation.stm, "XXX" v_gc_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gc], resulttype=llmemory.Address) hop.genop('adr_add', [v_gc_adr, c_ofs], resultvar=op.result) diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -1,4 +1,5 @@ from rpython.rlib.objectmodel import we_are_translated, specialize +from rpython.rlib.objectmodel import CDefinedIntSymbolic from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.extregistry import ExtRegistryEntry @@ -6,39 +7,17 @@ TID = rffi.UINT -tid_offset = CDefinedIntSymbolic('offsetof(struct rpyobj_s, tid)', default=4) +tid_offset = CDefinedIntSymbolic('offsetof(struct rpyobj_s, tid)') +adr_nursery_free = CDefinedIntSymbolic('(long)(&STM_SEGMENT->nursery_current)') +adr_nursery_top = CDefinedIntSymbolic('(long)(&STM_SEGMENT->nursery_end)') +adr_transaction_read_version = ( + CDefinedIntSymbolic('(long)(&STM_SEGMENT->transaction_read_version)')) - at dont_look_inside -def get_nursery_current_adr(): - addr = llop.stm_get_adr_of_nursery_current(llmemory.Address) - return rffi.cast(lltype.Signed, addr) - - at dont_look_inside -def get_nursery_nextlimit_adr(): - addr = llop.stm_get_adr_of_nursery_nextlimit(llmemory.Address) - return rffi.cast(lltype.Signed, addr) - - at dont_look_inside -def get_active_adr(): - addr = llop.stm_get_adr_of_active(llmemory.Address) - return rffi.cast(lltype.Signed, addr) - - at dont_look_inside -def get_adr_of_private_rev_num(): - addr = llop.stm_get_adr_of_private_rev_num(llmemory.Address) - return rffi.cast(lltype.Signed, addr) - - at dont_look_inside -def get_adr_of_read_barrier_cache(): - addr = llop.stm_get_adr_of_read_barrier_cache(llmemory.Address) - return rffi.cast(lltype.Signed, addr) - def jit_stm_transaction_break_point(): if we_are_translated(): llop.jit_stm_transaction_break_point(lltype.Void) - def jit_stm_should_break_transaction(if_there_is_no_other): # if_there_is_no_other means that we use this point only # if there is no other break point in the trace. @@ -47,20 +26,11 @@ return llop.jit_stm_should_break_transaction(lltype.Bool, if_there_is_no_other) - @dont_look_inside def become_inevitable(): llop.stm_become_inevitable(lltype.Void) @dont_look_inside -def stop_all_other_threads(): - llop.stm_stop_all_other_threads(lltype.Void) - - at dont_look_inside -def partial_commit_and_resume_other_threads(): - llop.stm_partial_commit_and_resume_other_threads(lltype.Void) - - at dont_look_inside def should_break_transaction(): return we_are_translated() and ( llop.stm_should_break_transaction(lltype.Bool)) From noreply at buildbot.pypy.org Sat Mar 22 17:47:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 17:47:58 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Try to be more careful: the jit backend must now emit GETFIELD_GC with Message-ID: <20140322164758.56A6E1D253B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70169:d1db1d32b975 Date: 2014-03-22 17:45 +0100 http://bitbucket.org/pypy/pypy/changeset/d1db1d32b975/ Log: Try to be more careful: the jit backend must now emit GETFIELD_GC with a SEGC7 prefix, but not GETFIELD_RAW. diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -164,7 +164,7 @@ descrs = self.gc_ll_descr.getframedescrs(self.cpu) if self.gc_ll_descr.kind == 'boehm': size_box = history.BoxInt() - op0 = ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], + op0 = ResOperation(rop.GETFIELD_RAW,[history.ConstInt(frame_info)], size_box, descr=descrs.jfi_frame_depth) self.newops.append(op0) @@ -174,14 +174,14 @@ elif not self.gc_ll_descr.stm: # we read size in bytes here, not the length size_box = history.BoxInt() - op0 = ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], + op0 = ResOperation(rop.GETFIELD_RAW,[history.ConstInt(frame_info)], size_box, descr=descrs.jfi_frame_size) self.newops.append(op0) self.gen_malloc_nursery_varsize_frame(size_box, frame) self.gen_initialize_tid(frame, descrs.arraydescr.tid) length_box = history.BoxInt() - op1 = ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], + op1 = ResOperation(rop.GETFIELD_RAW,[history.ConstInt(frame_info)], length_box, descr=descrs.jfi_frame_depth) self.newops.append(op1) @@ -190,7 +190,7 @@ else: # jfi_frame_size not set in STM! length_box = history.BoxInt() - op0 = ResOperation(rop.GETFIELD_GC, [history.ConstInt(frame_info)], + op0 = ResOperation(rop.GETFIELD_RAW,[history.ConstInt(frame_info)], length_box, descr=descrs.jfi_frame_depth) self.newops.append(op0) @@ -407,21 +407,18 @@ val = op.getarg(0) if self.must_apply_write_barrier(val, op.getarg(1)): self.gen_write_barrier(val) - #op = op.copy_and_change(rop.SETFIELD_RAW) self.newops.append(op) def handle_write_barrier_setinteriorfield(self, op): val = op.getarg(0) if self.must_apply_write_barrier(val, op.getarg(2)): self.gen_write_barrier(val) - #op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) self.newops.append(op) def handle_write_barrier_setarrayitem(self, op): val = op.getarg(0) if self.must_apply_write_barrier(val, op.getarg(2)): self.gen_write_barrier_array(val, op.getarg(1)) - #op = op.copy_and_change(rop.SETARRAYITEM_RAW) self.newops.append(op) def gen_write_barrier(self, v_base): diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -786,10 +786,10 @@ i2 = call_assembler(i0, f0, descr=casmdescr) """, """ [i0, f0] - i1 = getfield_gc(ConstClass(frame_info), descr=jfi_frame_size) + i1 = getfield_raw(ConstClass(frame_info), descr=jfi_frame_size) p1 = call_malloc_nursery_varsize_frame(i1) setfield_gc(p1, 0, descr=tiddescr) - i2 = getfield_gc(ConstClass(frame_info), descr=jfi_frame_depth) + i2 = getfield_raw(ConstClass(frame_info), descr=jfi_frame_depth) setfield_gc(p1, i2, descr=framelendescr) setfield_gc(p1, ConstClass(frame_info), descr=jf_frame_info) setarrayitem_gc(p1, 0, i0, descr=signedframedescr) diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -741,7 +741,7 @@ guard_not_forced()[] """, """ [i0, f0] - i1 = getfield_gc(ConstClass(frame_info), descr=jfi_frame_depth) + i1 = getfield_raw(ConstClass(frame_info), descr=jfi_frame_depth) p1 = call_malloc_nursery_varsize_frame(i1) setfield_gc(p1, 0, descr=tiddescr) setfield_gc(p1, i1, descr=framelendescr) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1475,10 +1475,11 @@ # ---------- - def load_from_mem(self, resloc, source_addr, size_loc, sign_loc): + def load_from_mem(self, resloc, source_addr, size_loc, sign_loc, op): assert isinstance(resloc, RegLoc) size = size_loc.value sign = sign_loc.value + self.mc.SEGC7_if_gc(op) if resloc.is_xmm: self.mc.MOVSD(resloc, source_addr) elif size == WORD: @@ -1503,6 +1504,7 @@ def save_into_mem(self, dest_addr, value_loc, size_loc): size = size_loc.value + self.mc.SEGC7_if_gc(op) if isinstance(value_loc, RegLoc) and value_loc.is_xmm: self.mc.MOVSD(dest_addr, value_loc) elif size == 1: @@ -1517,6 +1519,7 @@ else: assert isinstance(value_loc, FloatImmedLoc) self.mc.MOV(dest_addr, value_loc.low_part_loc()) + self.mc.SEGC7_if_gc(op) self.mc.MOV(dest_addr.add_offset(4), value_loc.high_part_loc()) else: not_implemented("save_into_mem size = %d" % size) @@ -1525,7 +1528,7 @@ base_loc, ofs_loc, size_loc, sign_loc = arglocs assert isinstance(size_loc, ImmedLoc) source_addr = AddressLoc(base_loc, ofs_loc) - self.load_from_mem(resloc, source_addr, size_loc, sign_loc) + self.load_from_mem(resloc, source_addr, size_loc, sign_loc, op) genop_getfield_raw = genop_getfield_gc genop_getfield_raw_pure = genop_getfield_gc @@ -1537,7 +1540,7 @@ assert isinstance(size_loc, ImmedLoc) scale = get_scale(size_loc.value) src_addr = addr_add(base_loc, ofs_loc, ofs.value, scale) - self.load_from_mem(resloc, src_addr, size_loc, sign_loc) + self.load_from_mem(resloc, src_addr, size_loc, sign_loc, op) genop_getarrayitem_gc_pure = genop_getarrayitem_gc genop_getarrayitem_raw = genop_getarrayitem_gc @@ -1547,7 +1550,7 @@ base_loc, ofs_loc, size_loc, ofs, sign_loc = arglocs assert isinstance(ofs, ImmedLoc) src_addr = addr_add(base_loc, ofs_loc, ofs.value, 0) - self.load_from_mem(resloc, src_addr, size_loc, sign_loc) + self.load_from_mem(resloc, src_addr, size_loc, sign_loc, op) def _imul_const_scaled(self, mc, targetreg, sourcereg, itemsize): """Produce one operation to do roughly @@ -1600,7 +1603,7 @@ src_addr = self._get_interiorfield_addr(temp_loc, index_loc, itemsize_loc, base_loc, ofs_loc) - self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc) + self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc, op) def genop_discard_increment_debug_counter(self, op, arglocs): # The argument should be an immediate address. This should @@ -1614,7 +1617,7 @@ base_loc, ofs_loc, size_loc, value_loc = arglocs assert isinstance(size_loc, ImmedLoc) dest_addr = AddressLoc(base_loc, ofs_loc) - self.save_into_mem(dest_addr, value_loc, size_loc) + self.save_into_mem(dest_addr, value_loc, size_loc, op) def genop_discard_setinteriorfield_gc(self, op, arglocs): (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, @@ -1622,7 +1625,7 @@ dest_addr = self._get_interiorfield_addr(temp_loc, index_loc, itemsize_loc, base_loc, ofs_loc) - self.save_into_mem(dest_addr, value_loc, fieldsize_loc) + self.save_into_mem(dest_addr, value_loc, fieldsize_loc, op) genop_discard_setinteriorfield_raw = genop_discard_setinteriorfield_gc @@ -1632,13 +1635,13 @@ assert isinstance(size_loc, ImmedLoc) scale = get_scale(size_loc.value) dest_addr = AddressLoc(base_loc, ofs_loc, scale, baseofs.value) - self.save_into_mem(dest_addr, value_loc, size_loc) + self.save_into_mem(dest_addr, value_loc, size_loc, op) def genop_discard_raw_store(self, op, arglocs): base_loc, ofs_loc, value_loc, size_loc, baseofs = arglocs assert isinstance(baseofs, ImmedLoc) dest_addr = AddressLoc(base_loc, ofs_loc, 0, baseofs.value) - self.save_into_mem(dest_addr, value_loc, size_loc) + self.save_into_mem(dest_addr, value_loc, size_loc, op) def genop_discard_strsetitem(self, op, arglocs): base_loc, ofs_loc, val_loc = arglocs @@ -2013,7 +2016,7 @@ size = WORD * 2 else: size = WORD - self.save_into_mem(raw_stack(base_ofs), return_val, imm(size)) + self.save_into_mem(raw_stack(base_ofs), return_val, imm(size), op) else: [fail_descr_loc] = arglocs ofs = self.cpu.get_ofs_of_frame_field('jf_descr') diff --git a/rpython/jit/backend/x86/codebuf.py b/rpython/jit/backend/x86/codebuf.py --- a/rpython/jit/backend/x86/codebuf.py +++ b/rpython/jit/backend/x86/codebuf.py @@ -81,3 +81,20 @@ if self.stm and we_are_translated(): from rpython.jit.backend.x86 import stmtlocal stmtlocal.c7_segment_prefix(self) + + def SEGC7_if_gc(self, op): + if self.stm and we_are_translated(): + from rpython.jit.backend.x86 import stmtlocal + from rpython.jit.metainterp.resoperation import rop + # + opnum = op.getopnum() + if opnum in (rop.GETFIELD_GC, + rop.GETFIELD_GC_PURE, + rop.GETARRAYITEM_GC, + rop.GETARRAYITEM_GC_PURE, + rop.GETINTERIORFIELD_GC, + rop.SETFIELD_GC, + rop.SETARRAYITEM_GC, + rop.SETINTERIORFIELD_GC, + ): + stmtlocal.c7_segment_prefix(self) From noreply at buildbot.pypy.org Sat Mar 22 18:31:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 18:31:42 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Add these temporarily, until we figure out how exactly we need it Message-ID: <20140322173142.E3EAA1C0483@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70170:ca4bf01af754 Date: 2014-03-22 18:29 +0100 http://bitbucket.org/pypy/pypy/changeset/ca4bf01af754/ Log: Add these temporarily, until we figure out how exactly we need it diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -6,6 +6,10 @@ from rpython.rlib.jit import dont_look_inside +def stop_all_other_threads(): "XXX" +def partial_commit_and_resume_other_threads(): "XXX" + + TID = rffi.UINT tid_offset = CDefinedIntSymbolic('offsetof(struct rpyobj_s, tid)') adr_nursery_free = CDefinedIntSymbolic('(long)(&STM_SEGMENT->nursery_current)') From noreply at buildbot.pypy.org Sat Mar 22 18:46:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 18:46:07 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fixes Message-ID: <20140322174607.45A271C073C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70171:03eba8c7dd62 Date: 2014-03-22 18:43 +0100 http://bitbucket.org/pypy/pypy/changeset/03eba8c7dd62/ Log: fixes diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1476,33 +1476,36 @@ # ---------- def load_from_mem(self, resloc, source_addr, size_loc, sign_loc, op): - assert isinstance(resloc, RegLoc) size = size_loc.value sign = sign_loc.value self.mc.SEGC7_if_gc(op) + self.generate_one_mov_with_extension(resloc, source_addr, size, sign) + + def generate_one_mov_with_extension(self, resloc, srcloc, size, sign): + assert isinstance(resloc, RegLoc) if resloc.is_xmm: - self.mc.MOVSD(resloc, source_addr) + self.mc.MOVSD(resloc, srcloc) elif size == WORD: - self.mc.MOV(resloc, source_addr) + self.mc.MOV(resloc, srcloc) elif size == 1: if sign: - self.mc.MOVSX8(resloc, source_addr) + self.mc.MOVSX8(resloc, srcloc) else: - self.mc.MOVZX8(resloc, source_addr) + self.mc.MOVZX8(resloc, srcloc) elif size == 2: if sign: - self.mc.MOVSX16(resloc, source_addr) + self.mc.MOVSX16(resloc, srcloc) else: - self.mc.MOVZX16(resloc, source_addr) + self.mc.MOVZX16(resloc, srcloc) elif IS_X86_64 and size == 4: if sign: - self.mc.MOVSX32(resloc, source_addr) + self.mc.MOVSX32(resloc, srcloc) else: - self.mc.MOV32(resloc, source_addr) # zero-extending + self.mc.MOV32(resloc, srcloc) # zero-extending else: not_implemented("load_from_mem size = %d" % size) - def save_into_mem(self, dest_addr, value_loc, size_loc): + def save_into_mem(self, dest_addr, value_loc, size_loc, op): size = size_loc.value self.mc.SEGC7_if_gc(op) if isinstance(value_loc, RegLoc) and value_loc.is_xmm: @@ -2156,15 +2159,6 @@ def _call_assembler_check_descr(self, value, tmploc): ofs = self.cpu.get_ofs_of_frame_field('jf_descr') - - if self.cpu.gc_ll_descr.stm: - # value is non-moving, but jf_descr may have a changed - # descr -> different copy - self._stm_ptr_eq_fastpath(self.mc, [mem(eax, ofs), imm(value)], - tmploc) - self.mc.J_il8(rx86.Conditions['NZ'], 0) - return self.mc.get_relative_pos() - self.mc.CMP(mem(eax, ofs), imm(value)) # patched later self.mc.J_il8(rx86.Conditions['E'], 0) # goto B if we get 'done_with_this_frame' @@ -2602,14 +2596,15 @@ self._emit_guard_not_forced(guard_token) def genop_discard_stm_read(self, op, arglocs): - assert IS_X86_64, "needed for X86_64_SCRATCH_REG" + if not IS_X86_64: + todo() # "needed for X86_64_SCRATCH_REG" mc = self.mc rmreg = X86_64_SCRATCH_REG.value mc.SEGC7() mc.MOVZX8_rj(rmreg, rstm.adr_transaction_read_version) # loc_src, loc_tmp = arglocs - if tmp_loc is None: + if loc_tmp is None: assert isinstance(loc_src, ImmedLoc) assert loc_src.value > 0 mem = loc_src.value >> 4 diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -68,7 +68,7 @@ """Overridden in CallBuilder32 and CallBuilder64""" if self.ressize == 0: return # void result - # use the code in load_from_mem to do the zero- or sign-extension + # use the code in self.asm to do the zero- or sign-extension srcloc = self.tmpresloc if srcloc is None: if self.restype == FLOAT: @@ -79,8 +79,8 @@ return # no need for any MOV if self.ressize == 1 and isinstance(srcloc, RegLoc): srcloc = srcloc.lowest8bits() - self.asm.load_from_mem(self.resloc, srcloc, - imm(self.ressize), imm(self.ressign)) + self.asm.generate_one_mov_with_extension(self.resloc, srcloc, + self.ressize, self.ressign) def push_gcmap(self): # we push *now* the gcmap, describing the status of GC registers From noreply at buildbot.pypy.org Sat Mar 22 20:01:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 20:01:19 +0100 (CET) Subject: [pypy-commit] stmgc default: stm_become_globally_unique_transaction() Message-ID: <20140322190119.CA7A21D290B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1082:a5f0a9669efe Date: 2014-03-22 19:59 +0100 http://bitbucket.org/pypy/stmgc/changeset/a5f0a9669efe/ Log: stm_become_globally_unique_transaction() diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -461,7 +461,7 @@ /* force all other threads to be paused. They will unpause automatically when we are done here, i.e. at mutex_unlock(). Important: we should not call cond_wait() in the meantime. */ - synchronize_all_threads(); + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); /* detect conflicts */ if (detect_write_read_conflicts()) @@ -500,6 +500,8 @@ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* wake up one thread in wait_for_end_of_inevitable_transaction() */ cond_signal(C_INEVITABLE); + if (globally_unique_transaction) + committed_globally_unique_transaction(); } /* done */ @@ -672,3 +674,12 @@ s_mutex_unlock(); } + +void stm_become_globally_unique_transaction(const char *msg) +{ + stm_become_inevitable(msg); /* may still abort */ + + s_mutex_lock(); + synchronize_all_threads(STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE); + s_mutex_unlock(); +} diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -68,7 +68,7 @@ } s_mutex_lock(); - synchronize_all_threads(); + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); mutex_pages_lock(); /* Make a new mmap at some other address, but of the same size as diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -134,7 +134,7 @@ if (is_major_collection_requested()) { /* if still true */ - synchronize_all_threads(); + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); if (is_major_collection_requested()) { /* if *still* true */ major_collection_now_at_safe_point(); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -319,7 +319,9 @@ /* If we are requested to enter a safe-point, we cannot proceed now. Wait until the safe-point request is removed for us. */ - +#ifdef STM_TESTS + abort_with_mutex(); +#endif cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED; cond_wait(C_REQUEST_REMOVED); @@ -327,7 +329,7 @@ } } -static void synchronize_all_threads(void) +static void synchronize_all_threads(enum sync_type_e sync_type) { enter_safe_point_if_requested(); @@ -335,7 +337,13 @@ why: if several threads call this function, the first one that goes past this point will set the "request safe point" on all other threads; then none of the other threads will go past the - enter_safe_point_if_requested() above. */ + enter_safe_point_if_requested() above. + */ + if (UNLIKELY(globally_unique_transaction)) { + assert(count_other_threads_sp_running() == 0); + return; + } + signal_everybody_to_pause_running(); /* If some other threads are SP_RUNNING, we cannot proceed now. @@ -352,6 +360,13 @@ } } + if (UNLIKELY(sync_type == STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE)) { + globally_unique_transaction = true; + assert(STM_SEGMENT->nursery_end == NSE_SIGPAUSE); + STM_SEGMENT->nursery_end = NURSERY_END; + return; /* don't remove the requests for safe-points in this case */ + } + /* Remove the requests for safe-points now. In principle we should remove it later, when the caller is done, but this is equivalent as long as we hold the mutex. @@ -359,6 +374,15 @@ remove_requests_for_safe_point(); /* => C_REQUEST_REMOVED */ } +static void committed_globally_unique_transaction(void) +{ + assert(globally_unique_transaction); + assert(STM_SEGMENT->nursery_end == NURSERY_END); + STM_SEGMENT->nursery_end = NSE_SIGPAUSE; + globally_unique_transaction = false; + remove_requests_for_safe_point(); +} + void _stm_collectable_safe_point(void) { /* If 'nursery_end' was set to NSE_SIGxxx by another thread, diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -29,6 +29,12 @@ static void release_thread_segment(stm_thread_local_t *tl); static void wait_for_end_of_inevitable_transaction(bool can_abort); -static void synchronize_all_threads(void); -static bool pause_signalled; +enum sync_type_e { + STOP_OTHERS_UNTIL_MUTEX_UNLOCK, + STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE, +}; +static void synchronize_all_threads(enum sync_type_e sync_type); +static void committed_globally_unique_transaction(void); + +static bool pause_signalled, globally_unique_transaction; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -325,6 +325,14 @@ void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); +/* Similar to stm_become_inevitable(), but additionally suspend all + other threads. A very heavy-handed way to make sure that no other + transaction is running concurrently. Avoid as much as possible. + Other transactions will continue running only after this transaction + commits. */ +void stm_become_globally_unique_transaction(const char *msg); + + /* ==================== END ==================== */ #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -54,6 +54,7 @@ bool _check_commit_transaction(void); bool _check_abort_transaction(void); bool _check_become_inevitable(void); +bool _check_become_globally_unique_transaction(void); int stm_is_inevitable(void); void _set_type_id(object_t *obj, uint32_t h); @@ -161,6 +162,10 @@ CHECKED(stm_become_inevitable("TEST")); } +bool _check_become_globally_unique_transaction() { + CHECKED(stm_become_globally_unique_transaction("TESTGUT")); +} + #undef CHECKED @@ -357,6 +362,10 @@ if lib._check_become_inevitable(): raise Conflict() +def stm_become_globally_unique_transaction(): + if lib._check_become_globally_unique_transaction(): + raise Conflict() + def stm_minor_collect(): lib.stm_collect(0) @@ -412,6 +421,10 @@ self.current_thread = 0 def teardown_method(self, meth): + tl = self.tls[self.current_thread] + if lib._stm_in_transaction(tl) and lib.stm_is_inevitable(): + self.commit_transaction() # must succeed! + # for n, tl in enumerate(self.tls): if lib._stm_in_transaction(tl): if self.current_thread != n: @@ -420,6 +433,7 @@ self.commit_transaction() # must succeed! else: self.abort_transaction() + # for tl in self.tls: lib.stm_unregister_thread_local(tl) lib.stm_teardown() diff --git a/c7/test/test_extra.py b/c7/test/test_extra.py --- a/c7/test/test_extra.py +++ b/c7/test/test_extra.py @@ -80,3 +80,13 @@ self.start_transaction() self.abort_transaction() assert seen == [] + + def test_stm_become_globally_unique_transaction(self): + self.start_transaction() + # + self.switch(1) + self.start_transaction() + lib._check_become_globally_unique_transaction() + assert lib.stm_is_inevitable() + # + py.test.raises(Conflict, self.switch, 0) From noreply at buildbot.pypy.org Sat Mar 22 20:05:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 20:05:45 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/a5f0a9669efe Message-ID: <20140322190545.50CB31D290C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70172:1d78acd6d99b Date: 2014-03-22 20:01 +0100 http://bitbucket.org/pypy/pypy/changeset/1d78acd6d99b/ Log: import stmgc/a5f0a9669efe diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -0e5239ae07f2 +a5f0a9669efe diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -462,7 +462,7 @@ /* force all other threads to be paused. They will unpause automatically when we are done here, i.e. at mutex_unlock(). Important: we should not call cond_wait() in the meantime. */ - synchronize_all_threads(); + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); /* detect conflicts */ if (detect_write_read_conflicts()) @@ -501,6 +501,8 @@ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* wake up one thread in wait_for_end_of_inevitable_transaction() */ cond_signal(C_INEVITABLE); + if (globally_unique_transaction) + committed_globally_unique_transaction(); } /* done */ @@ -673,3 +675,12 @@ s_mutex_unlock(); } + +void stm_become_globally_unique_transaction(const char *msg) +{ + stm_become_inevitable(msg); /* may still abort */ + + s_mutex_lock(); + synchronize_all_threads(STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE); + s_mutex_unlock(); +} diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c --- a/rpython/translator/stm/src_stm/stm/forksupport.c +++ b/rpython/translator/stm/src_stm/stm/forksupport.c @@ -69,7 +69,7 @@ } s_mutex_lock(); - synchronize_all_threads(); + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); mutex_pages_lock(); /* Make a new mmap at some other address, but of the same size as diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -135,7 +135,7 @@ if (is_major_collection_requested()) { /* if still true */ - synchronize_all_threads(); + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); if (is_major_collection_requested()) { /* if *still* true */ major_collection_now_at_safe_point(); diff --git a/rpython/translator/stm/src_stm/stm/sync.c b/rpython/translator/stm/src_stm/stm/sync.c --- a/rpython/translator/stm/src_stm/stm/sync.c +++ b/rpython/translator/stm/src_stm/stm/sync.c @@ -320,7 +320,9 @@ /* If we are requested to enter a safe-point, we cannot proceed now. Wait until the safe-point request is removed for us. */ - +#ifdef STM_TESTS + abort_with_mutex(); +#endif cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED; cond_wait(C_REQUEST_REMOVED); @@ -328,7 +330,7 @@ } } -static void synchronize_all_threads(void) +static void synchronize_all_threads(enum sync_type_e sync_type) { enter_safe_point_if_requested(); @@ -336,7 +338,13 @@ why: if several threads call this function, the first one that goes past this point will set the "request safe point" on all other threads; then none of the other threads will go past the - enter_safe_point_if_requested() above. */ + enter_safe_point_if_requested() above. + */ + if (UNLIKELY(globally_unique_transaction)) { + assert(count_other_threads_sp_running() == 0); + return; + } + signal_everybody_to_pause_running(); /* If some other threads are SP_RUNNING, we cannot proceed now. @@ -353,6 +361,13 @@ } } + if (UNLIKELY(sync_type == STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE)) { + globally_unique_transaction = true; + assert(STM_SEGMENT->nursery_end == NSE_SIGPAUSE); + STM_SEGMENT->nursery_end = NURSERY_END; + return; /* don't remove the requests for safe-points in this case */ + } + /* Remove the requests for safe-points now. In principle we should remove it later, when the caller is done, but this is equivalent as long as we hold the mutex. @@ -360,6 +375,15 @@ remove_requests_for_safe_point(); /* => C_REQUEST_REMOVED */ } +static void committed_globally_unique_transaction(void) +{ + assert(globally_unique_transaction); + assert(STM_SEGMENT->nursery_end == NURSERY_END); + STM_SEGMENT->nursery_end = NSE_SIGPAUSE; + globally_unique_transaction = false; + remove_requests_for_safe_point(); +} + void _stm_collectable_safe_point(void) { /* If 'nursery_end' was set to NSE_SIGxxx by another thread, diff --git a/rpython/translator/stm/src_stm/stm/sync.h b/rpython/translator/stm/src_stm/stm/sync.h --- a/rpython/translator/stm/src_stm/stm/sync.h +++ b/rpython/translator/stm/src_stm/stm/sync.h @@ -30,6 +30,12 @@ static void release_thread_segment(stm_thread_local_t *tl); static void wait_for_end_of_inevitable_transaction(bool can_abort); -static void synchronize_all_threads(void); -static bool pause_signalled; +enum sync_type_e { + STOP_OTHERS_UNTIL_MUTEX_UNLOCK, + STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE, +}; +static void synchronize_all_threads(enum sync_type_e sync_type); +static void committed_globally_unique_transaction(void); + +static bool pause_signalled, globally_unique_transaction; diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -326,6 +326,14 @@ void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); +/* Similar to stm_become_inevitable(), but additionally suspend all + other threads. A very heavy-handed way to make sure that no other + transaction is running concurrently. Avoid as much as possible. + Other transactions will continue running only after this transaction + commits. */ +void stm_become_globally_unique_transaction(const char *msg); + + /* ==================== END ==================== */ #endif From noreply at buildbot.pypy.org Sat Mar 22 20:05:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 20:05:47 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Use stm_become_globally_unique_transaction here Message-ID: <20140322190547.0BE811D290C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70173:95d7a38c9d56 Date: 2014-03-22 20:03 +0100 http://bitbucket.org/pypy/pypy/changeset/95d7a38c9d56/ Log: Use stm_become_globally_unique_transaction here diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -6,10 +6,6 @@ from rpython.rlib.jit import dont_look_inside -def stop_all_other_threads(): "XXX" -def partial_commit_and_resume_other_threads(): "XXX" - - TID = rffi.UINT tid_offset = CDefinedIntSymbolic('offsetof(struct rpyobj_s, tid)') adr_nursery_free = CDefinedIntSymbolic('(long)(&STM_SEGMENT->nursery_current)') @@ -35,6 +31,13 @@ llop.stm_become_inevitable(lltype.Void) @dont_look_inside +def stop_all_other_threads(): + llop.stm_become_globally_unique_transaction(lltype.Void) + +def partial_commit_and_resume_other_threads(): + pass # for now + + at dont_look_inside def should_break_transaction(): return we_are_translated() and ( llop.stm_should_break_transaction(lltype.Bool)) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -426,6 +426,7 @@ 'stm_identityhash': LLOp(canfold=True), 'stm_addr_get_tid': LLOp(canfold=True), 'stm_become_inevitable': LLOp(canmallocgc=True), + 'stm_become_globally_unique_transaction': LLOp(canmallocgc=True), 'stm_push_root': LLOp(), 'stm_pop_root_into': LLOp(), 'stm_commit_if_not_atomic': LLOp(canmallocgc=True), diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -129,6 +129,9 @@ string_literal = c_string_constant(info) return 'stm_become_inevitable(%s);' % (string_literal,) +def stm_become_globally_unique_transaction(funcgen, op): + return 'stm_become_globally_unique_transaction("for the JIT");' + def stm_push_root(funcgen, op): arg0 = funcgen.expr(op.args[0]) return 'STM_PUSH_ROOT(stm_thread_local, %s);' % (arg0,) From noreply at buildbot.pypy.org Sat Mar 22 20:13:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 20:13:04 +0100 (CET) Subject: [pypy-commit] stmgc default: Test stm_become_globally_unique_transaction here, it fails Message-ID: <20140322191304.6D3541C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1083:badf9ae726c2 Date: 2014-03-22 20:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/badf9ae726c2/ Log: Test stm_become_globally_unique_transaction here, it fails diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -288,6 +288,11 @@ stm_become_inevitable("please"); pop_roots(); return NULL; + } else if (get_rand(360) == 1) { + fprintf(stdout, "GUT"); + push_roots(); + stm_become_globally_unique_transaction("really"); + pop_roots(); } return p; } From noreply at buildbot.pypy.org Sat Mar 22 20:20:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 20:20:54 +0100 (CET) Subject: [pypy-commit] stmgc default: Can't sleep if we're inevitable! Message-ID: <20140322192054.9DC171C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1084:378758606915 Date: 2014-03-22 20:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/378758606915/ Log: Can't sleep if we're inevitable! diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -130,6 +130,7 @@ else if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { assert(contmgr.other_pseg->transaction_state != TS_INEVITABLE); contmgr.abort_other = true; + contmgr.try_sleep = false; } else if (contmgr.other_pseg->transaction_state == TS_INEVITABLE) { contmgr.abort_other = false; From noreply at buildbot.pypy.org Sat Mar 22 20:43:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 20:43:57 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix the test (don't expect variables like 'arg' to remain sensible Message-ID: <20140322194357.CFFB01C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1085:a33130d9f35c Date: 2014-03-22 20:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/a33130d9f35c/ Log: Fix the test (don't expect variables like 'arg' to remain sensible across a setjmp! They need to be volatile) diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -288,11 +288,12 @@ stm_become_inevitable("please"); pop_roots(); return NULL; - } else if (get_rand(360) == 1) { - fprintf(stdout, "GUT"); + } else if (get_rand(240) == 1) { push_roots(); stm_become_globally_unique_transaction("really"); + fprintf(stderr, "[GUT/%d]", (int)STM_SEGMENT->segment_num); pop_roots(); + return NULL; } return p; } @@ -324,8 +325,9 @@ setup_thread(); - objptr_t p = NULL; + objptr_t p; stm_jmpbuf_t here; + volatile int call_fork = (arg != NULL); STM_START_TRANSACTION(&stm_thread_local, here); assert(td.num_roots >= td.num_roots_at_transaction_start); @@ -345,7 +347,7 @@ if (p == (objptr_t)-1) { push_roots(); - if (arg == NULL) { /* common case */ + if (call_fork == 0) { /* common case */ stm_commit_transaction(); td.num_roots_at_transaction_start = td.num_roots; if (get_rand(100) < 98) { @@ -361,7 +363,7 @@ else { /* run a fork() inside the transaction */ printf("========== FORK =========\n"); - arg = NULL; + call_fork = 0; pid_t child = fork(); printf("=== in process %d thread %lx, fork() returned %d\n", (int)getpid(), (long)pthread_self(), (int)child); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -191,9 +191,9 @@ STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; STM_PSEGMENT->threadlocal_at_start_of_transaction = tl->thread_local_obj; + enter_safe_point_if_requested(); dprintf(("start_transaction\n")); - enter_safe_point_if_requested(); s_mutex_unlock(); /* Now running the SP_RUNNING start. We can set our From noreply at buildbot.pypy.org Sat Mar 22 20:45:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Mar 2014 20:45:39 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/a33130d9f35c Message-ID: <20140322194539.EFB441C0483@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70174:12e6d39bdeea Date: 2014-03-22 20:43 +0100 http://bitbucket.org/pypy/pypy/changeset/12e6d39bdeea/ Log: import stmgc/a33130d9f35c diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -a5f0a9669efe +a33130d9f35c diff --git a/rpython/translator/stm/src_stm/stm/contention.c b/rpython/translator/stm/src_stm/stm/contention.c --- a/rpython/translator/stm/src_stm/stm/contention.c +++ b/rpython/translator/stm/src_stm/stm/contention.c @@ -131,6 +131,7 @@ else if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { assert(contmgr.other_pseg->transaction_state != TS_INEVITABLE); contmgr.abort_other = true; + contmgr.try_sleep = false; } else if (contmgr.other_pseg->transaction_state == TS_INEVITABLE) { contmgr.abort_other = false; diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -192,9 +192,9 @@ STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; STM_PSEGMENT->threadlocal_at_start_of_transaction = tl->thread_local_obj; + enter_safe_point_if_requested(); dprintf(("start_transaction\n")); - enter_safe_point_if_requested(); s_mutex_unlock(); /* Now running the SP_RUNNING start. We can set our From noreply at buildbot.pypy.org Sat Mar 22 23:10:31 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 22 Mar 2014 23:10:31 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: clean up win32 test failures Message-ID: <20140322221031.217171C0290@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70175:ed924db837d6 Date: 2014-03-22 23:13 +0200 http://bitbucket.org/pypy/pypy/changeset/ed924db837d6/ Log: clean up win32 test failures From noreply at buildbot.pypy.org Sat Mar 22 23:10:32 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 22 Mar 2014 23:10:32 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: add MSVC macros Message-ID: <20140322221032.660E91C0290@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70176:d75ef8250857 Date: 2014-03-22 23:14 +0200 http://bitbucket.org/pypy/pypy/changeset/d75ef8250857/ Log: add MSVC macros diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -20,10 +20,11 @@ def detect_model_from_c_compiler(): # based on http://sourceforge.net/p/predef/wiki/Architectures/ + # and http://msdn.microsoft.com/en-us/library/b0084kay.aspx mapping = { - MODEL_X86_64: ['__amd64__', '__amd64', '__x86_64__', '__x86_64'], - MODEL_ARM: ['__arm__', '__thumb__'], - MODEL_X86: ['i386', '__i386', '__i386__', '__i686__'], + MODEL_X86_64: ['__amd64__', '__amd64', '__x86_64__', '__x86_64', '_M_X64', '_M_AMD64'], + MODEL_ARM: ['__arm__', '__thumb__','_M_ARM_EP'], + MODEL_X86: ['i386', '__i386', '__i386__', '__i686__','_M_IX86'], MODEL_PPC_64: ['__powerpc64__'], } for k, v in mapping.iteritems(): From noreply at buildbot.pypy.org Sat Mar 22 23:10:33 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 22 Mar 2014 23:10:33 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: unicode on win32 is 2 bytes Message-ID: <20140322221033.970661C0290@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70177:c900402751c9 Date: 2014-03-22 23:21 +0200 http://bitbucket.org/pypy/pypy/changeset/c900402751c9/ Log: unicode on win32 is 2 bytes diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -528,6 +528,11 @@ """) def test_rewrite_assembler_newstr_newunicode(self): + import sys + if sys.platform == 'win32': + unicode_size = 2 + else: + unicode_size = 4 self.check_rewrite(""" [i2] p0 = newstr(14) @@ -538,21 +543,21 @@ """, """ [i2] p0 = call_malloc_nursery( \ - %(strdescr.basesize + 16 * strdescr.itemsize + \ + %%(strdescr.basesize + 16 * strdescr.itemsize + \ unicodedescr.basesize + 10 * unicodedescr.itemsize)d) - setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) + setfield_gc(p0, %%(strdescr.tid)d, descr=tiddescr) setfield_gc(p0, 14, descr=strlendescr) - p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) - setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) + p1 = int_add(p0, %%(strdescr.basesize + 16 * strdescr.itemsize)d) + setfield_gc(p1, %%(unicodedescr.tid)d, descr=tiddescr) setfield_gc(p1, 10, descr=unicodelendescr) - p2 = call_malloc_nursery_varsize(2, 4, i2, \ + p2 = call_malloc_nursery_varsize(2, %d, i2, \ descr=unicodedescr) setfield_gc(p2, i2, descr=unicodelendescr) p3 = call_malloc_nursery_varsize(1, 1, i2, \ descr=strdescr) setfield_gc(p3, i2, descr=strlendescr) jump() - """) + """ % unicode_size) def test_write_barrier_before_setfield_gc(self): self.check_rewrite(""" From noreply at buildbot.pypy.org Sat Mar 22 23:10:34 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 22 Mar 2014 23:10:34 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: fix compilation, only to discover testing gil release on extern function call fails Message-ID: <20140322221034.ED1A21C0290@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70178:c1c9972e5a4e Date: 2014-03-22 23:42 +0200 http://bitbucket.org/pypy/pypy/changeset/c1c9972e5a4e/ Log: fix compilation, only to discover testing gil release on extern function call fails diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -14,6 +14,7 @@ from rpython.rlib.longlong2float import float2longlong, longlong2float from rpython.rlib.rarithmetic import ovfcheck, is_valid_int from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo class BasicTests: @@ -3228,11 +3229,12 @@ self.check_resops(arraylen_gc=2) def test_release_gil_flush_heap_cache(self): + eci = ExternalCompilationInfo() if sys.platform == "win32": - py.test.skip("needs 'time'") + eci = ExternalCompilationInfo(libraries=["msvcrt"]) T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True) + external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True, compilation_info=eci) # Not a real lock, has all the same properties with respect to GIL # release though, so good for this test. class Lock(object): @@ -3920,10 +3922,13 @@ self.interp_operations(f, []) def test_external_call(self): + eci = ExternalCompilationInfo() + if sys.platform == "win32": + eci = ExternalCompilationInfo(libraries=["msvcrt"]) from rpython.rlib.objectmodel import invoke_around_extcall T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T) + external = rffi.llexternal("time", [T], rffi.TIME_T, compilation_info=eci) class Oups(Exception): pass From noreply at buildbot.pypy.org Sat Mar 22 23:10:36 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 22 Mar 2014 23:10:36 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: ansi C Message-ID: <20140322221036.349A81C0290@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70179:feb18385325c Date: 2014-03-22 23:56 +0200 http://bitbucket.org/pypy/pypy/changeset/feb18385325c/ Log: ansi C diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py --- a/pypy/module/cpyext/test/test_eval.py +++ b/pypy/module/cpyext/test/test_eval.py @@ -312,8 +312,9 @@ ("get_flags", "METH_NOARGS", """ PyCompilerFlags flags; + int result; flags.cf_flags = 0; - int result = PyEval_MergeCompilerFlags(&flags); + result = PyEval_MergeCompilerFlags(&flags); return Py_BuildValue("ii", result, flags.cf_flags); """), ]) From noreply at buildbot.pypy.org Sat Mar 22 23:10:37 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 22 Mar 2014 23:10:37 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: cpython compatibility: did this test ever pass with -A ? Message-ID: <20140322221037.7F2E21C0290@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70180:0c785cb5b007 Date: 2014-03-23 00:08 +0200 http://bitbucket.org/pypy/pypy/changeset/0c785cb5b007/ Log: cpython compatibility: did this test ever pass with -A ? diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -695,7 +695,7 @@ if sys.platform != 'win32': return assert u'test'.encode('mbcs') == 'test' - assert u'caf\xe9'.encode('mbcs') == 'caf\xe9' + assert u'caf\xe9'.encode('mbcs') == '?' assert u'\u040a'.encode('mbcs') == '?' # some cyrillic letter assert 'cafx\e9'.decode('mbcs') == u'cafx\e9' From noreply at buildbot.pypy.org Sun Mar 23 08:14:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 08:14:21 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Silence warnings Message-ID: <20140323071421.C645A1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70181:bd87e99d44f6 Date: 2014-03-23 08:13 +0100 http://bitbucket.org/pypy/pypy/changeset/bd87e99d44f6/ Log: Silence warnings diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -911,7 +911,7 @@ void pypy_stm_register_thread_local(void) { stm_register_thread_local(&stm_thread_local); - stm_thread_local.mem_clear_on_abort = &pypy_g_ExcData; + stm_thread_local.mem_clear_on_abort = (char *)&pypy_g_ExcData; stm_thread_local.mem_bytes_to_clear_on_abort = sizeof(pypy_g_ExcData); } ''' diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -95,7 +95,8 @@ stm_jmpbuf_t jmpbuf; long volatile v_counter = 0; #ifndef NDEBUG - object_t **volatile old_shadowstack = stm_thread_local.shadowstack; + struct stm_shadowentry_s *volatile old_shadowstack = + stm_thread_local.shadowstack; #endif STM_PUSH_ROOT(stm_thread_local, arg); From noreply at buildbot.pypy.org Sun Mar 23 08:24:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 08:24:55 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Un-delete this test (see 53a2a9ecde3e) and make it pass Message-ID: <20140323072455.6D9EB1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70182:52f5f6d0153b Date: 2014-03-23 08:24 +0100 http://bitbucket.org/pypy/pypy/changeset/52f5f6d0153b/ Log: Un-delete this test (see 53a2a9ecde3e) and make it pass diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -91,6 +91,12 @@ hop.genop("stm_write", [v_struct]) hop.rename('bare_' + opname) + def gct_gc_writebarrier(self, hop): + v_struct = hop.spaceop.args[0] + assert var_needsgc(v_struct), ("gc_writebarrier: the argument is %r" + % v_struct.concretetype) + hop.genop("stm_write", [v_struct]) + def gc_header_for(self, obj, needs_hash=False): return self.gcdata.gc.gcheaderbuilder.header_of_object(obj) diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -382,6 +382,23 @@ assert match assert int(match.group(1)) < 20 + def test_gc_writebarrier(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + prebuilt = lltype.malloc(X, immortal=True) + prebuilt.foo = 42 + + def main(argv): + llop.gc_writebarrier(lltype.Void, prebuilt) + debug_print(objectmodel.current_object_addr_as_int(prebuilt)) + prebuilt.foo = 43 + debug_print(objectmodel.current_object_addr_as_int(prebuilt)) + return 0 + + t, cbuilder = self.compile(main) + data, dataerr = cbuilder.cmdexec('', err=True) + lines = dataerr.split('\n') + assert lines[0] == lines[1] + def test_dtoa(self): def main(argv): a = len(argv) * 0.2 From noreply at buildbot.pypy.org Sun Mar 23 08:38:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 08:38:20 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Next fix Message-ID: <20140323073820.69E2A1C3396@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70183:0b83cbb912b2 Date: 2014-03-23 08:37 +0100 http://bitbucket.org/pypy/pypy/changeset/0b83cbb912b2/ Log: Next fix diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -6,6 +6,7 @@ from rpython.memory.gctypelayout import WEAKREF, WEAKREFPTR from rpython.rtyper import rmodel, llannotation from rpython.translator.backendopt.support import var_needsgc +from rpython.rlib import rstm class StmFrameworkGCTransformer(BaseFrameworkGCTransformer): @@ -103,6 +104,12 @@ def gct_gc_adr_of_root_stack_top(self, hop): hop.genop("stm_get_root_stack_top", [], resultvar=hop.spaceop.result) + def gct_get_write_barrier_failing_case(self, hop): + op = hop.spaceop + c_write_slowpath = rmodel.inputconst( + lltype.Signed, rstm.adr_write_slowpath) + hop.genop("cast_int_to_ptr", [c_write_slowpath], resultvar=op.result) + ## def _gct_with_roots_pushed(self, hop): ## livevars = self.push_roots(hop) ## self.default(hop) diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -12,6 +12,7 @@ adr_nursery_top = CDefinedIntSymbolic('(long)(&STM_SEGMENT->nursery_end)') adr_transaction_read_version = ( CDefinedIntSymbolic('(long)(&STM_SEGMENT->transaction_read_version)')) +adr_write_slowpath = CDefinedIntSymbolic('(long)(&_stm_write_slowpath)') def jit_stm_transaction_break_point(): diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -392,6 +392,7 @@ debug_print(objectmodel.current_object_addr_as_int(prebuilt)) prebuilt.foo = 43 debug_print(objectmodel.current_object_addr_as_int(prebuilt)) + llop.get_write_barrier_failing_case(rffi.VOIDP) return 0 t, cbuilder = self.compile(main) From noreply at buildbot.pypy.org Sun Mar 23 08:52:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 08:52:23 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Test and fix Message-ID: <20140323075223.38E201C12A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70184:b76622eef525 Date: 2014-03-23 08:51 +0100 http://bitbucket.org/pypy/pypy/changeset/b76622eef525/ Log: Test and fix diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -727,7 +727,7 @@ self.pop_roots(hop, livevars) def gct_gc_can_move(self, hop): - assert not self.translator.config.translation.stm, "XXX" + assert not self.translator.config.translation.stm op = hop.spaceop v_addr = hop.genop('cast_ptr_to_adr', [op.args[0]], resulttype=llmemory.Address) @@ -738,7 +738,7 @@ if self.shrink_array_ptr is None: return GCTransformer.gct_shrink_array(self, hop) op = hop.spaceop - assert not self.translator.config.translation.stm, "XXX" + assert not self.translator.config.translation.stm v_addr = hop.genop('cast_ptr_to_adr', [op.args[0]], resulttype=llmemory.Address) v_length = op.args[1] @@ -795,7 +795,7 @@ ofs = llmemory.offsetof(self.c_const_gcdata.concretetype.TO, 'inst_' + attrname) c_ofs = rmodel.inputconst(lltype.Signed, ofs) - assert not self.translator.config.translation.stm, "XXX" + assert not self.translator.config.translation.stm v_gcdata_adr = hop.genop('cast_ptr_to_adr', [self.c_const_gcdata], resulttype=llmemory.Address) hop.genop('adr_add', [v_gcdata_adr, c_ofs], resultvar=op.result) diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -110,6 +110,9 @@ lltype.Signed, rstm.adr_write_slowpath) hop.genop("cast_int_to_ptr", [c_write_slowpath], resultvar=op.result) + def gct_gc_can_move(self, hop): + hop.rename('stm_can_move') + ## def _gct_with_roots_pushed(self, hop): ## livevars = self.push_roots(hop) ## self.default(hop) diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -64,7 +64,7 @@ def stm_can_move(funcgen, op): arg0 = funcgen.expr(op.args[0]) result = funcgen.expr(op.result) - return '%s = stm_can_move(%s);' % (result, arg0) + return '%s = stm_can_move((object_t *)%s);' % (result, arg0) def stm_allocate_tid(funcgen, op): arg_size = funcgen.expr(op.args[0]) diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -382,7 +382,7 @@ assert match assert int(match.group(1)) < 20 - def test_gc_writebarrier(self): + def test_gc_writebarrier_and_misc(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) prebuilt = lltype.malloc(X, immortal=True) prebuilt.foo = 42 @@ -393,6 +393,7 @@ prebuilt.foo = 43 debug_print(objectmodel.current_object_addr_as_int(prebuilt)) llop.get_write_barrier_failing_case(rffi.VOIDP) + assert llop.gc_can_move(lltype.Bool, prebuilt) == False return 0 t, cbuilder = self.compile(main) From noreply at buildbot.pypy.org Sun Mar 23 09:03:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 09:03:57 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Test and fix Message-ID: <20140323080357.8080F1C3396@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70185:2b49d01603f9 Date: 2014-03-23 09:03 +0100 http://bitbucket.org/pypy/pypy/changeset/2b49d01603f9/ Log: Test and fix diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -425,6 +425,7 @@ 'stm_id': LLOp(sideeffects=False), 'stm_identityhash': LLOp(canfold=True), 'stm_addr_get_tid': LLOp(canfold=True), + 'stm_get_root_stack_top': LLOp(sideeffects=False), 'stm_become_inevitable': LLOp(canmallocgc=True), 'stm_become_globally_unique_transaction': LLOp(canmallocgc=True), 'stm_push_root': LLOp(), diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -211,6 +211,11 @@ def stm_ignored_stop(funcgen, op): return '/* stm_ignored_stop */' +def stm_get_root_stack_top(funcgen, op): + result = funcgen.expr(op.result) + return '%s = (%s)&stm_thread_local.shadowstack;' % ( + result, cdecl(funcgen.lltypename(op.result), '')) + ##def stm_initialize(funcgen, op): ## return '''stm_initialize(); @@ -306,11 +311,6 @@ ## result = funcgen.expr(op.result) ## return '%s = (%s)&stm_active;' % ( ## result, cdecl(funcgen.lltypename(op.result), '')) - -##def stm_get_root_stack_top(funcgen, op): -## result = funcgen.expr(op.result) -## return '%s = (%s)&stm_shadowstack;' % ( -## result, cdecl(funcgen.lltypename(op.result), '')) ##def stm_get_adr_of_private_rev_num(funcgen, op): ## result = funcgen.expr(op.result) diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -1,7 +1,7 @@ import py from rpython.rlib import rstm, rgc, objectmodel from rpython.rlib.debug import debug_print -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper.lltypesystem.rclass import OBJECTPTR from rpython.rtyper.lltypesystem.lloperation import llop from rpython.translator.stm.test.support import CompiledSTMTests @@ -393,6 +393,7 @@ prebuilt.foo = 43 debug_print(objectmodel.current_object_addr_as_int(prebuilt)) llop.get_write_barrier_failing_case(rffi.VOIDP) + llop.gc_adr_of_root_stack_top(llmemory.Address) assert llop.gc_can_move(lltype.Bool, prebuilt) == False return 0 From noreply at buildbot.pypy.org Sun Mar 23 09:23:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 09:23:24 +0100 (CET) Subject: [pypy-commit] pypy default: The old interface box.getaddr() should now only be used to cast a raw Message-ID: <20140323082324.4350A1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70186:69395dff1d5c Date: 2014-03-23 09:21 +0100 http://bitbucket.org/pypy/pypy/changeset/69395dff1d5c/ Log: The old interface box.getaddr() should now only be used to cast a raw pointer to an address, not a GC pointer. diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -108,6 +108,7 @@ raise NotImplementedError def getaddr(self): + "Only for raw addresses (BoxInt & ConstInt), not for GC addresses" raise NotImplementedError def sort_key(self): @@ -321,9 +322,6 @@ else: return 0 - def getaddr(self): - return llmemory.cast_ptr_to_adr(self.value) - def same_constant(self, other): if isinstance(other, ConstPtr): return self.value == other.value @@ -494,9 +492,6 @@ return lltype.cast_opaque_ptr(PTR, self.getref_base()) getref._annspecialcase_ = 'specialize:arg(1)' - def getaddr(self): - return llmemory.cast_ptr_to_adr(self.value) - def _get_hash_(self): if self.value: return lltype.identityhash(self.value) From noreply at buildbot.pypy.org Sun Mar 23 09:23:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 09:23:25 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: The old interface box.getaddr() should now only be used to cast a Message-ID: <20140323082325.9B49B1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70187:dc3260a800db Date: 2014-03-23 09:21 +0100 http://bitbucket.org/pypy/pypy/changeset/dc3260a800db/ Log: The old interface box.getaddr() should now only be used to cast a raw pointer to an address, not a GC pointer. diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -108,6 +108,7 @@ raise NotImplementedError def getaddr(self): + "Only for raw addresses (BoxInt & ConstInt), not for GC addresses" raise NotImplementedError def sort_key(self): @@ -321,9 +322,6 @@ else: return 0 - def getaddr(self): - return llmemory.cast_ptr_to_adr(self.value) - def same_constant(self, other): if isinstance(other, ConstPtr): return self.value == other.value @@ -494,9 +492,6 @@ return lltype.cast_opaque_ptr(PTR, self.getref_base()) getref._annspecialcase_ = 'specialize:arg(1)' - def getaddr(self): - return llmemory.cast_ptr_to_adr(self.value) - def _get_hash_(self): if self.value: return lltype.identityhash(self.value) From noreply at buildbot.pypy.org Sun Mar 23 09:29:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 09:29:34 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix (see existing test) Message-ID: <20140323082934.0B9AE1C33B0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70188:b06faf7b2607 Date: 2014-03-23 09:29 +0100 http://bitbucket.org/pypy/pypy/changeset/b06faf7b2607/ Log: Fix (see existing test) diff --git a/rpython/rlib/atomic_ops.py b/rpython/rlib/atomic_ops.py --- a/rpython/rlib/atomic_ops.py +++ b/rpython/rlib/atomic_ops.py @@ -2,20 +2,14 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo - -cdir = py.path.local(__file__).join('..', '..', 'translator', 'stm') -cdir2 = py.path.local(__file__).join('..', '..', 'translator', 'c') - eci = ExternalCompilationInfo( - include_dirs = [cdir, cdir2], post_include_bits = [''' -#include "src_stm/atomic_ops.h" #define pypy_bool_cas(ptr, old, _new) \\ - bool_cas((volatile unsigned long*)(ptr), \\ + __sync_bool_compare_and_swap((volatile unsigned long*)(ptr), \\ (unsigned long)(old), \\ (unsigned long)(_new)) #define pypy_fetch_and_add(ptr, value) \\ - fetch_and_add((volatile unsigned long*)(ptr), \\ + __sync_fetch_and_add((volatile unsigned long*)(ptr), \\ (unsigned long)(value)) '''], ) From noreply at buildbot.pypy.org Sun Mar 23 10:15:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 10:15:40 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Add a test Message-ID: <20140323091540.D006E1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70189:55b694971c42 Date: 2014-03-23 10:02 +0100 http://bitbucket.org/pypy/pypy/changeset/55b694971c42/ Log: Add a test diff --git a/rpython/translator/stm/test/test_readbarrier.py b/rpython/translator/stm/test/test_readbarrier.py --- a/rpython/translator/stm/test/test_readbarrier.py +++ b/rpython/translator/stm/test/test_readbarrier.py @@ -36,3 +36,15 @@ res = self.interpret(f1, []) assert res == 42 assert self.read_barriers == [] + + def test_getarrayitem(self): + X = lltype.GcArray(lltype.Signed) + x1 = lltype.malloc(X, 5, immortal=True, zero=True) + x1[2] = 42 + + def f1(n): + return x1[n] + + res = self.interpret(f1, [2]) + assert res == 42 + assert self.read_barriers == [x1] From noreply at buildbot.pypy.org Sun Mar 23 10:15:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 10:15:42 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Test and fix Message-ID: <20140323091542.7607A1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70190:c2aa0c41cac7 Date: 2014-03-23 10:15 +0100 http://bitbucket.org/pypy/pypy/changeset/c2aa0c41cac7/ Log: Test and fix diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -699,25 +699,31 @@ #address operations def OP_RAW_STORE(self, op): + tlprefix, char = '', 'char' + if (self._is_stm() and isinstance(op.args[0].concretetype, Ptr) + and op.args[0].concretetype.TO._gckind == 'gc'): + tlprefix, char = ' TLPREFIX ', 'rpygcchar_t' addr = self.expr(op.args[0]) offset = self.expr(op.args[1]) value = self.expr(op.args[2]) TYPE = op.args[2].concretetype - typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '') - return ( - '((%(typename)s) (((char *)%(addr)s) + %(offset)s))[0] = %(value)s;' - % locals()) + typename = cdecl(self.db.gettype(TYPE).replace('@', tlprefix+'*@'), '') + return ('((%(typename)s) (((%(char)s *)%(addr)s)' + ' + %(offset)s))[0] = %(value)s;' % locals()) OP_BARE_RAW_STORE = OP_RAW_STORE def OP_RAW_LOAD(self, op): + tlprefix, char = '', 'char' + if (self._is_stm() and isinstance(op.args[0].concretetype, Ptr) + and op.args[0].concretetype.TO._gckind == 'gc'): + tlprefix, char = ' TLPREFIX ', 'rpygcchar_t' addr = self.expr(op.args[0]) offset = self.expr(op.args[1]) result = self.expr(op.result) TYPE = op.result.concretetype - typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '') - return ( - "%(result)s = ((%(typename)s) (((char *)%(addr)s) + %(offset)s))[0];" - % locals()) + typename = cdecl(self.db.gettype(TYPE).replace('@', tlprefix+'*@'), '') + return ('%(result)s = ((%(typename)s)' + ' (((%(char)s *)%(addr)s) + %(offset)s))[0];' % locals()) def OP_CAST_PRIMITIVE(self, op): TYPE = self.lltypemap(op.result) diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -473,3 +473,22 @@ t, cbuilder = self.compile(main) data = cbuilder.cmdexec('') assert '< 42 >\n< 84 >\n' in data + + def test_raw_load_store_on_gc(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + prebuilt = lltype.malloc(X, immortal=True) + prebuilt.foo = 42 + ofs_foo = llmemory.offsetof(X, 'foo') + + def main(argv): + p = lltype.cast_opaque_ptr(llmemory.GCREF, prebuilt) + llop.raw_store(lltype.Void, p, ofs_foo, -84) + print prebuilt.foo + prebuilt.foo = -1298 + print llop.raw_load(lltype.Signed, p, ofs_foo) + return 0 + + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('') + assert '-84\n' in data + assert '-1298\n' in data From noreply at buildbot.pypy.org Sun Mar 23 12:16:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 12:16:25 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: in-progress: temporarily disable some stm helpers Message-ID: <20140323111625.7F5A31C12A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70191:2b3d2341edde Date: 2014-03-23 11:55 +0100 http://bitbucket.org/pypy/pypy/changeset/2b3d2341edde/ Log: in-progress: temporarily disable some stm helpers diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -35,7 +35,7 @@ from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.objectmodel import compute_unique_id from rpython.jit.backend.x86 import stmtlocal -from rpython.rlib import rstm +from rpython.rlib import rstm, nonconst class Assembler386(BaseAssembler): @@ -210,8 +210,11 @@ # (rsp + FRAME_FIXED_SIZE + RET_ADDR + ALIGNMENT) mc.LEA_rs(edi.value, FRAME_FIXED_SIZE * WORD + WORD + (16-WORD)) mc.MOV(esi, imm(self.stm_longjmp_callback_addr)) - fn = stmtlocal.stm_transaction_break_fn - mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) + + # XXX UD2 + #fn = stmtlocal.stm_transaction_break_fn + #mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) + # self._reload_frame_if_necessary(mc) # @@ -870,13 +873,16 @@ assert IS_X86_64 # load the address of the STM_RESUME_BUF self.mc.LEA_rs(edi.value, FRAME_FIXED_SIZE * WORD) - fn = stmtlocal.stm_invalidate_jmp_buf_fn - self.mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) + + # XXX UD2 + #fn = stmtlocal.stm_invalidate_jmp_buf_fn + #self.mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) + # there could have been a collection in invalidate_jmp_buf() - self._reload_frame_if_necessary(self.mc, wb=False) - - # the return value is the jitframe - self.mc.MOV_rr(eax.value, ebp.value) + # but _call_footer_shadowstack() will reload the frame + else: + # the return value is the jitframe + self.mc.MOV_rr(eax.value, ebp.value) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: @@ -926,7 +932,15 @@ def _call_footer_shadowstack(self, gcrootmap): rst = self.mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) - if rx86.fits_in_32bits(rst): + if self.cpu.gc_ll_descr.stm and we_are_translated(): + assert rx86.fits_in_32bits(rst) + self.mc.SEGTL() + self.mc.MOV_rj(ebx.value, rst) + self.mc.LEA_rm(ebx.value, (ebx.value, -WORD)) + self.mc.MOV_rm(eax.value, (ebx.value, 0)) + self.mc.SEGTL() + self.mc.MOV_jr(rst, ebx.value) + elif rx86.fits_in_32bits(rst): self.mc.SEGTL() self.mc.SUB_ji8(rst, WORD) # SUB [rootstacktop], WORD else: @@ -1194,7 +1208,7 @@ cb = callbuilder.CallBuilder(self, fnloc, arglocs) cb.emit_no_collect() - def _reload_frame_if_necessary(self, mc, align_stack=False, wb=True): + def _reload_frame_if_necessary(self, mc, align_stack=False): gc_ll_descr = self.cpu.gc_ll_descr gcrootmap = gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: @@ -1204,7 +1218,7 @@ mc.MOV(ebp, mem(ecx, -WORD)) # wbdescr = gc_ll_descr.write_barrier_descr - if gcrootmap and wbdescr and wb: + if gcrootmap and wbdescr: # frame never uses card marking, so we enforce this is not # an array self._write_barrier_fastpath(mc, wbdescr, [ebp], array=False, @@ -2204,7 +2218,6 @@ # a flag in the object at arglocs[0], and if set, it calls a # helper piece of assembler. The latter saves registers as needed # and call the function remember_young_pointer() from the GC. - assert not self.cpu.gc_ll_descr.stm if we_are_translated(): cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) @@ -2247,12 +2260,12 @@ helper_num = 4 elif self._regalloc is not None and self._regalloc.xrm.reg_bindings: helper_num += 2 - if descr.get_b_slowpath(helper_num) == 0: # tests only + if descr.get_wb_slowpath(helper_num) == 0: # tests only assert not we_are_translated() self.cpu.gc_ll_descr.write_barrier_descr = descr self._build_b_slowpath(descr, card_marking, bool(self._regalloc.xrm.reg_bindings)) - assert descr.get_b_slowpath(helper_num) != 0 + assert descr.get_wb_slowpath(helper_num) != 0 # if not is_frame: mc.PUSH(loc_base) @@ -2547,8 +2560,14 @@ mc = self.mc # if stm_should_break_transaction() - fn = stmtlocal.stm_should_break_transaction_fn - mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) + + + # XXX UD2 + #fn = stmtlocal.stm_should_break_transaction_fn + #mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) + mc.MOV(eax, imm(0)) + + mc.TEST8(eax.lowest8bits(), eax.lowest8bits()) mc.J_il(rx86.Conditions['Z'], 0xfffff) # patched later jz_location2 = mc.get_relative_pos() From noreply at buildbot.pypy.org Sun Mar 23 12:16:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 12:16:27 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Revert a little bit more code handling multiple write-barriers Message-ID: <20140323111627.2C8621C12A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70192:55ef5dced4ff Date: 2014-03-23 12:12 +0100 http://bitbucket.org/pypy/pypy/changeset/55ef5dced4ff/ Log: Revert a little bit more code handling multiple write-barriers diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -37,15 +37,6 @@ random_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(STUFF, immortal=True)) -class WBDescrForTests(AbstractDescr): - returns_modified_object = False - b_slowpath = (0, 0, 0, 0) - def get_b_slowpath(self, c1): - return self.b_slowpath[c1] - def set_b_slowpath(self, c1, addr): - self.b_slowpath = (self.b_slowpath[:c1] + (addr,) + - self.b_slowpath[c1+1:]) - class Runner(object): @@ -2133,12 +2124,11 @@ FUNC = self.FuncType([lltype.Ptr(S)], lltype.Void) func_ptr = llhelper(lltype.Ptr(FUNC), func_void) funcbox = self.get_funcbox(self.cpu, func_ptr) - class WriteBarrierDescr(WBDescrForTests): + class WriteBarrierDescr(AbstractDescr): jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 - def get_barrier_fn(self, cpu, returns_modified_object): - assert self.returns_modified_object == returns_modified_object + def get_write_barrier_fn(self, cpu): return funcbox.getint() # for cond in [False, True]: @@ -2159,58 +2149,6 @@ else: assert record == [] - def test_cond_call_gc_wb_stm_returns_modified_object(self): - py.test.skip("XXX rethink this test") - def func_void(a): - record.append(a) - return t - record = [] - # - S = lltype.GcStruct('S', ('tid', lltype.Signed)) - FUNC = self.FuncType([lltype.Ptr(S)], lltype.Ptr(S)) - func_ptr = llhelper(lltype.Ptr(FUNC), func_void) - funcbox = self.get_funcbox(self.cpu, func_ptr) - class WriteBarrierDescr(WBDescrForTests): - returns_modified_object = True - jit_wb_if_flag = 4096 - jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') - jit_wb_if_flag_singlebyte = 0x10 - def get_barrier_fn(self, cpu, returns_modified_object): - assert self.returns_modified_object == returns_modified_object - return funcbox.getint() - # - for cond in [False, True]: - value = random.randrange(-sys.maxint, sys.maxint) - if cond: - value |= 4096 - else: - value &= ~4096 - s = lltype.malloc(S) - s.tid = value - sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) - t = lltype.malloc(S) - tgcref = lltype.cast_opaque_ptr(llmemory.GCREF, t) - del record[:] - p0 = BoxPtr() - operations = [ - ResOperation(rop.COND_CALL_GC_WB, [p0], None, - descr=WriteBarrierDescr()), - ResOperation(rop.FINISH, [p0], None, descr=BasicFinalDescr(4)) - ] - inputargs = [p0] - looptoken = JitCellToken() - self.cpu.compile_loop(None, inputargs, operations, looptoken) - deadframe = self.cpu.execute_token(looptoken, sgcref) - fail = self.cpu.get_latest_descr(deadframe) - assert fail.identifier == 4 - res = self.cpu.get_ref_value(deadframe, 0) - if cond: - assert record == [s] - assert res == tgcref - else: - assert record == [] - assert res == sgcref - def test_cond_call_gc_wb_array(self): def func_void(a): record.append(rffi.cast(lltype.Signed, a)) @@ -2220,16 +2158,13 @@ FUNC = self.FuncType([lltype.Ptr(S)], lltype.Void) func_ptr = llhelper(lltype.Ptr(FUNC), func_void) funcbox = self.get_funcbox(self.cpu, func_ptr) - class WriteBarrierDescr(WBDescrForTests): + class WriteBarrierDescr(AbstractDescr): jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 jit_wb_cards_set = 0 # <= without card marking - def get_barrier_fn(self, cpu, returns_modified_object): - assert self.returns_modified_object == returns_modified_object + def get_write_barrier_fn(self, cpu): return funcbox.getint() - def get_barrier_from_array_fn(self, cpu): - return 0 # for cond in [False, True]: value = random.randrange(-sys.maxint, sys.maxint) @@ -2270,7 +2205,7 @@ FUNC = self.FuncType([lltype.Ptr(S)], lltype.Void) func_ptr = llhelper(lltype.Ptr(FUNC), func_void) funcbox = self.get_funcbox(self.cpu, func_ptr) - class WriteBarrierDescr(WBDescrForTests): + class WriteBarrierDescr(AbstractDescr): jit_wb_if_flag = 4096 jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10') jit_wb_if_flag_singlebyte = 0x10 @@ -2278,7 +2213,7 @@ jit_wb_cards_set_byteofs = struct.pack("i", 32768).index('\x80') jit_wb_cards_set_singlebyte = -0x80 jit_wb_card_page_shift = 7 - def get_barrier_from_array_fn(self, cpu): + def get_write_barrier_from_array_fn(self, cpu): return funcbox.getint() # for BoxIndexCls in [BoxInt, ConstInt]*3: diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -562,8 +562,8 @@ self._call_header_with_stack_check() self._check_frame_depth_debug(self.mc) - operations = regalloc.prepare_loop(inputargs, operations, looptoken, - clt.allgcrefs) + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) @@ -660,8 +660,7 @@ name = "Bridge # %s" % (descr_number,) self.cpu.profile_agent.native_code_written(name, rawstart, fullsize) - return AsmInfo(ops_offset, startpos + rawstart, - codeendpos - startpos) + return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) def write_pending_failure_recoveries(self): # for each pending guard, generate the code of the recovery stub @@ -879,14 +878,23 @@ #self.mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) # there could have been a collection in invalidate_jmp_buf() - # but _call_footer_shadowstack() will reload the frame + # reload the frame into eax, while at the same time popping + # it off the shadowstack + rst = self.mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) + assert rx86.fits_in_32bits(rst) + self.mc.SEGTL() + self.mc.MOV_rj(ebx.value, rst) + self.mc.LEA_rm(ebx.value, (ebx.value, -WORD)) + self.mc.MOV_rm(eax.value, (ebx.value, 0)) + self.mc.SEGTL() + self.mc.MOV_jr(rst, ebx.value) else: # the return value is the jitframe self.mc.MOV_rr(eax.value, ebp.value) - - gcrootmap = self.cpu.gc_ll_descr.gcrootmap - if gcrootmap and gcrootmap.is_shadow_stack: - self._call_footer_shadowstack(gcrootmap) + # + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_footer_shadowstack(gcrootmap) for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): self.mc.MOV_rs(self.cpu.CALLEE_SAVE_REGISTERS[i].value, @@ -932,15 +940,7 @@ def _call_footer_shadowstack(self, gcrootmap): rst = self.mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) - if self.cpu.gc_ll_descr.stm and we_are_translated(): - assert rx86.fits_in_32bits(rst) - self.mc.SEGTL() - self.mc.MOV_rj(ebx.value, rst) - self.mc.LEA_rm(ebx.value, (ebx.value, -WORD)) - self.mc.MOV_rm(eax.value, (ebx.value, 0)) - self.mc.SEGTL() - self.mc.MOV_jr(rst, ebx.value) - elif rx86.fits_in_32bits(rst): + if rx86.fits_in_32bits(rst): self.mc.SEGTL() self.mc.SUB_ji8(rst, WORD) # SUB [rootstacktop], WORD else: @@ -1209,15 +1209,15 @@ cb.emit_no_collect() def _reload_frame_if_necessary(self, mc, align_stack=False): - gc_ll_descr = self.cpu.gc_ll_descr - gcrootmap = gc_ll_descr.gcrootmap - if gcrootmap and gcrootmap.is_shadow_stack: - rst = mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) - mc.SEGTL() - mc.MOV(ecx, heap(rst)) - mc.MOV(ebp, mem(ecx, -WORD)) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap: + if gcrootmap.is_shadow_stack: + rst = mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) + mc.SEGTL() + mc.MOV(ecx, heap(rst)) + mc.MOV(ebp, mem(ecx, -WORD)) # - wbdescr = gc_ll_descr.write_barrier_descr + wbdescr = self.cpu.gc_ll_descr.write_barrier_descr if gcrootmap and wbdescr: # frame never uses card marking, so we enforce this is not # an array @@ -1281,9 +1281,6 @@ genop_guard_float_gt = _cmpop_guard_float("A", "B", "BE","AE") genop_guard_float_ge = _cmpop_guard_float("AE","BE", "B", "A") - - - def genop_math_sqrt(self, op, arglocs, resloc): self.mc.SQRTSD(arglocs[0], resloc) @@ -1831,15 +1828,7 @@ genop_guard_guard_isnull = genop_guard_guard_false def genop_guard_guard_value(self, ign_1, guard_op, guard_token, locs, ign_2): - argtype = guard_op.getarg(0).type - if self.cpu.gc_ll_descr.stm and argtype == REF: - assert guard_op.getarg(1).type == REF - # x64 has no support for 64bit immed. Force them into registers! - # XXX: do better for 32 bit - self.genop_guard_ptr_eq(ign_1, guard_op, guard_token, - locs, ign_2) - return - elif argtype == FLOAT: + if guard_op.getarg(0).type == FLOAT: assert guard_op.getarg(1).type == FLOAT self.mc.UCOMISD(locs[0], locs[1]) else: @@ -2260,18 +2249,18 @@ helper_num = 4 elif self._regalloc is not None and self._regalloc.xrm.reg_bindings: helper_num += 2 - if descr.get_wb_slowpath(helper_num) == 0: # tests only + if self.wb_slowpath[helper_num] == 0: # tests only assert not we_are_translated() self.cpu.gc_ll_descr.write_barrier_descr = descr - self._build_b_slowpath(descr, card_marking, - bool(self._regalloc.xrm.reg_bindings)) - assert descr.get_wb_slowpath(helper_num) != 0 + self._build_wb_slowpath(card_marking, + bool(self._regalloc.xrm.reg_bindings)) + assert self.wb_slowpath[helper_num] != 0 # if not is_frame: mc.PUSH(loc_base) if is_frame and align_stack: mc.SUB_ri(esp.value, 16 - WORD) # erase the return address - mc.CALL(imm(descr.get_b_slowpath(helper_num))) + mc.CALL(imm(self.wb_slowpath[helper_num])) if is_frame and align_stack: mc.ADD_ri(esp.value, 16 - WORD) # erase the return address @@ -2459,7 +2448,6 @@ def malloc_cond_varsize(self, kind, nursery_free_adr, nursery_top_adr, lengthloc, itemsize, maxlength, gcmap, arraydescr): - assert not self.cpu.gc_ll_descr.stm from rpython.jit.backend.llsupport.descr import ArrayDescr assert isinstance(arraydescr, ArrayDescr) From noreply at buildbot.pypy.org Sun Mar 23 12:16:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 12:16:28 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fix Message-ID: <20140323111628.740131C12A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70193:dba83bbc67c6 Date: 2014-03-23 12:15 +0100 http://bitbucket.org/pypy/pypy/changeset/dba83bbc67c6/ Log: fix diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -866,6 +866,7 @@ # def _call_footer(self): + gcrootmap = self.cpu.gc_ll_descr.gcrootmap if self.cpu.gc_ll_descr.stm and we_are_translated(): # call stm_invalidate_jmp_buf(), in case we called # stm_transaction_break() earlier @@ -892,7 +893,6 @@ # the return value is the jitframe self.mc.MOV_rr(eax.value, ebp.value) # - gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: self._call_footer_shadowstack(gcrootmap) From noreply at buildbot.pypy.org Sun Mar 23 12:23:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 12:23:21 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: clang emits a warning when casting from ptr to "char TLPREFIX *". Message-ID: <20140323112321.8A7711C12A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70194:d968c3d4d7f1 Date: 2014-03-23 12:22 +0100 http://bitbucket.org/pypy/pypy/changeset/d968c3d4d7f1/ Log: clang emits a warning when casting from ptr to "char TLPREFIX *". diff --git a/rpython/translator/c/primitive.py b/rpython/translator/c/primitive.py --- a/rpython/translator/c/primitive.py +++ b/rpython/translator/c/primitive.py @@ -189,7 +189,7 @@ if isinstance(realobj, int): return _name_tagged(realobj, db) realvalue = cast_opaque_ptr(Ptr(typeOf(realobj)), value) - return db.get(realvalue, static=static) + return '((rpygcchar_t *)%s)' % (db.get(realvalue, static=static),) else: return 'NULL' From noreply at buildbot.pypy.org Sun Mar 23 13:42:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 13:42:47 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: simplify Message-ID: <20140323124247.B3FEA1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70195:6cab230db021 Date: 2014-03-23 12:41 +0100 http://bitbucket.org/pypy/pypy/changeset/6cab230db021/ Log: simplify diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -885,7 +885,7 @@ assert rx86.fits_in_32bits(rst) self.mc.SEGTL() self.mc.MOV_rj(ebx.value, rst) - self.mc.LEA_rm(ebx.value, (ebx.value, -WORD)) + self.mc.SUB_ri(ebx.value, -WORD) self.mc.MOV_rm(eax.value, (ebx.value, 0)) self.mc.SEGTL() self.mc.MOV_jr(rst, ebx.value) From noreply at buildbot.pypy.org Sun Mar 23 13:42:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 13:42:49 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: In-progress: add the %fs or %gs segment prefix in the core of Message-ID: <20140323124249.4CF711C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70196:0a1abc01bb9f Date: 2014-03-23 13:42 +0100 http://bitbucket.org/pypy/pypy/changeset/0a1abc01bb9f/ Log: In-progress: add the %fs or %gs segment prefix in the core of the assembler production. We'll have to fix all places that use memory references, which is the point here because we'll need a segment prefix for any GC pointer, with stm. diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -49,6 +49,10 @@ def fits_in_32bits(value): return -2147483648 <= value <= 2147483647 +SEGMENT_NO = '\x00' +SEGMENT_FS = '\x64' +SEGMENT_GS = '\x65' + # ____________________________________________________________ # Emit a single char @@ -146,7 +150,7 @@ # Emit a mod/rm referencing a stack location [EBP+offset] @specialize.arg(2) -def encode_stack_bp(mc, offset, force_32bits, orbyte): +def encode_stack_bp(mc, (segment, offset), force_32bits, orbyte): if not force_32bits and single_byte(offset): mc.writechar(chr(0x40 | orbyte | R.ebp)) mc.writeimm8(offset) @@ -155,8 +159,12 @@ mc.writeimm32(offset) return 0 +def rex_stack_bp(mc, (segment, offset), _): + mc.write_segment_prefix(segment) + return 0 + def stack_bp(argnum, force_32bits=False): - return encode_stack_bp, argnum, force_32bits, None + return encode_stack_bp, argnum, force_32bits, rex_stack_bp # ____________________________________________________________ # Emit a mod/rm referencing a stack location [ESP+offset] @@ -182,7 +190,7 @@ # ____________________________________________________________ # Emit a mod/rm referencing a memory location [reg1+offset] -def encode_mem_reg_plus_const(mc, (reg, offset), _, orbyte): +def encode_mem_reg_plus_const(mc, (segment, reg, offset), _, orbyte): assert reg != R.esp and reg != R.ebp # reg1 = reg_number_3bits(mc, reg) @@ -209,7 +217,8 @@ mc.writeimm32(offset) return 0 -def rex_mem_reg_plus_const(mc, (reg, offset), _): +def rex_mem_reg_plus_const(mc, (segment, reg, offset), _): + mc.write_segment_prefix(segment) if reg >= 8: return REX_B return 0 @@ -220,9 +229,8 @@ # ____________________________________________________________ # Emit a mod/rm referencing an array memory location [reg1+reg2*scale+offset] -def encode_mem_reg_plus_scaled_reg_plus_const(mc, - (reg1, reg2, scaleshift, offset), - _, orbyte): +def encode_mem_reg_plus_scaled_reg_plus_const( + mc, (segment, reg1, reg2, scaleshift, offset), _, orbyte): # emit "reg1 + (reg2 << scaleshift) + offset" assert reg1 != R.ebp and reg2 != R.esp assert 0 <= scaleshift < 4 @@ -262,9 +270,9 @@ mc.writeimm32(offset) return 0 -def rex_mem_reg_plus_scaled_reg_plus_const(mc, - (reg1, reg2, scaleshift, offset), - _): +def rex_mem_reg_plus_scaled_reg_plus_const( + mc, (segment, reg1, reg2, scaleshift, offset), _): + mc.write_segment_prefix(segment) rex = 0 if reg1 >= 8: rex |= REX_B if reg2 >= 8: rex |= REX_X @@ -280,7 +288,7 @@ # with immediate(argnum)). @specialize.arg(2) -def encode_abs(mc, immediate, _, orbyte): +def encode_abs(mc, (segment, immediate), _, orbyte): # expands to either '\x05' on 32-bit, or '\x04\x25' on 64-bit if mc.WORD == 8: mc.writechar(chr(0x04 | orbyte)) @@ -291,8 +299,12 @@ mc.writeimm32(immediate) return 0 +def rex_abs(mc, (segment, immediate), _): + mc.write_segment_prefix(segment) + return 0 + def abs_(argnum): - return encode_abs, argnum, None, None + return encode_abs, argnum, None, rex_abs # ____________________________________________________________ # For 64-bits mode: the REX.W, REX.R, REX.X, REG.B prefixes @@ -330,7 +342,7 @@ def insn(*encoding): def encode(mc, *args): rexbyte = 0 - if mc.WORD == 8: + if 1: #mc.WORD == 8: always needed for the SEGMENT_xx prefix # compute the REX byte, if any for encode_step, arg, extra, rex_step in encoding_steps: if rex_step: @@ -465,6 +477,10 @@ self.writechar(chr((imm >> 16) & 0xFF)) self.writechar(chr((imm >> 24) & 0xFF)) + def write_segment_prefix(self, segment): + if segment != SEGMENT_NO: + self.writechar(segment) + # ------------------------------ MOV ------------------------------ MOV_ri = insn(register(1), '\xB8', immediate(2)) diff --git a/rpython/jit/backend/x86/test/test_rx86.py b/rpython/jit/backend/x86/test/test_rx86.py --- a/rpython/jit/backend/x86/test/test_rx86.py +++ b/rpython/jit/backend/x86/test/test_rx86.py @@ -33,51 +33,69 @@ def test_mov_br(): s = CodeBuilder32() - s.MOV_br(-36, edx) + s.MOV_br((SEGMENT_NO, -36), edx) assert s.getvalue() == '\x89\x55\xDC' +def test_mov_br_segment(): + s = CodeBuilder32() + s.MOV_br((SEGMENT_FS, -36), edx) + assert s.getvalue() == '\x64\x89\x55\xDC' + def test_mov_rb(): s = CodeBuilder32() - s.MOV_rb(edx, -36) + s.MOV_rb(edx, (SEGMENT_NO, -36)) assert s.getvalue() == '\x8B\x55\xDC' def test_mov_rm(): s = CodeBuilder32() - s.MOV_rm(edx, (edi, 0)) - s.MOV_rm(edx, (edi, -128)) - s.MOV_rm(edx, (edi, 128)) + s.MOV_rm(edx, (SEGMENT_NO, edi, 0)) + s.MOV_rm(edx, (SEGMENT_NO, edi, -128)) + s.MOV_rm(edx, (SEGMENT_NO, edi, 128)) assert s.getvalue() == '\x8B\x17\x8B\x57\x80\x8B\x97\x80\x00\x00\x00' +def test_mov_rm_segment(): + s = CodeBuilder32() + s.MOV_rm(edx, (SEGMENT_FS, edi, 0)) + s.MOV_rm(edx, (SEGMENT_GS, edi, -128)) + assert s.getvalue() == '\x64\x8B\x17\x65\x8B\x57\x80' + def test_mov_mr(): s = CodeBuilder32() - s.MOV_mr((edi, 0), edx) - s.MOV_mr((edi, -128), edx) - s.MOV_mr((edi, 128), edx) + s.MOV_mr((SEGMENT_NO, edi, 0), edx) + s.MOV_mr((SEGMENT_NO, edi, -128), edx) + s.MOV_mr((SEGMENT_NO, edi, 128), edx) assert s.getvalue() == '\x89\x17\x89\x57\x80\x89\x97\x80\x00\x00\x00' def test_mov_ra(): s = CodeBuilder32() - s.MOV_ra(edx, (esi, edi, 2, 0)) - s.MOV_ra(edx, (esi, edi, 2, -128)) - s.MOV_ra(edx, (esi, edi, 2, 128)) + s.MOV_ra(edx, (SEGMENT_NO, esi, edi, 2, 0)) + s.MOV_ra(edx, (SEGMENT_NO, esi, edi, 2, -128)) + s.MOV_ra(edx, (SEGMENT_NO, esi, edi, 2, 128)) assert s.getvalue() == ('\x8B\x14\xBE' + '\x8B\x54\xBE\x80' + '\x8B\x94\xBE\x80\x00\x00\x00') +def test_mov_ra_segment(): + s = CodeBuilder32() + s.MOV_ra(edx, (SEGMENT_GS, esi, edi, 2, 0)) + s.MOV_ra(edx, (SEGMENT_FS, esi, edi, 2, -128)) + assert s.getvalue() == ('\x65\x8B\x14\xBE' + + '\x64\x8B\x54\xBE\x80') + def test_mov_ra_no_base(): s = CodeBuilder32() - s.MOV_ra(edx, (NO_BASE_REGISTER, edi, 2, 0)) + s.MOV_ra(edx, (SEGMENT_NO, NO_BASE_REGISTER, edi, 2, 0)) assert s.getvalue() == '\x8B\x14\xBD\x00\x00\x00\x00' s = CodeBuilder32() - s.MOV_ra(edx, (NO_BASE_REGISTER, edi, 2, 0xCD)) + s.MOV_ra(edx, (SEGMENT_NO, NO_BASE_REGISTER, edi, 2, 0xCD)) assert s.getvalue() == '\x8B\x14\xBD\xCD\x00\x00\x00' def test_mov_ar(): s = CodeBuilder32() - s.MOV_ar((esi, edi, 2, 0), edx) - s.MOV_ar((esi, edi, 2, -128), edx) - s.MOV_ar((esi, edi, 2, 128), edx) + s.MOV_ar((SEGMENT_NO, esi, edi, 2, 0), edx) + s.MOV_ar((SEGMENT_NO, esi, edi, 2, -128), edx) + s.MOV_ar((SEGMENT_NO, esi, edi, 2, 128), edx) assert s.getvalue() == ('\x89\x14\xBE' + '\x89\x54\xBE\x80' + '\x89\x94\xBE\x80\x00\x00\x00') @@ -90,12 +108,12 @@ def test_lea_rb(): s = CodeBuilder32() - s.LEA_rb(ecx, -36) + s.LEA_rb(ecx, (SEGMENT_NO, -36)) assert s.getvalue() == '\x8D\x4D\xDC' def test_lea32_rb(): s = CodeBuilder32() - s.LEA32_rb(ecx, -36) + s.LEA32_rb(ecx, (SEGMENT_NO, -36)) assert s.getvalue() == '\x8D\x8D\xDC\xFF\xFF\xFF' def test_call_l(s=None): @@ -120,17 +138,22 @@ def test_movsd_rj(): s = CodeBuilder32() - s.MOVSD_xj(xmm2, 0x01234567) + s.MOVSD_xj(xmm2, (SEGMENT_NO, 0x01234567)) assert s.getvalue() == '\xF2\x0F\x10\x15\x67\x45\x23\x01' +def test_movsd_rj_segment(): + s = CodeBuilder32() + s.MOVSD_xj(xmm2, (SEGMENT_GS, 0x01234567)) + assert s.getvalue() == '\x65\xF2\x0F\x10\x15\x67\x45\x23\x01' + def test_movzx8_rm(): s = CodeBuilder32() - s.MOVZX8_rm(ecx, (eax, 16)) + s.MOVZX8_rm(ecx, (SEGMENT_NO, eax, 16)) assert s.getvalue() == '\x0F\xB6\x48\x10' def test_movzx16_rm(): s = CodeBuilder32() - s.MOVZX16_rm(ecx, (eax, 16)) + s.MOVZX16_rm(ecx, (SEGMENT_NO, eax, 16)) assert s.getvalue() == '\x0F\xB7\x48\x10' def test_div(): @@ -169,17 +192,19 @@ assert_encodes_as(CodeBuilder32, 'OR8_rr', (bl, bh), '\x08\xFB') def test_test8_mi(): - assert_encodes_as(CodeBuilder32, 'TEST8_mi', ((edx, 16), 99), + assert_encodes_as(CodeBuilder32, 'TEST8_mi', ((SEGMENT_NO, edx, 16), 99), '\xF6\x42\x10\x63') def test_test8_ji(): - assert_encodes_as(CodeBuilder32, 'TEST8_ji', (0x12345678, 99), + assert_encodes_as(CodeBuilder32, 'TEST8_ji', ((SEGMENT_NO,0x12345678), 99), '\xF6\x05\x78\x56\x34\x12\x63') def test_mov8(): cb = CodeBuilder32 - assert_encodes_as(cb, 'MOV8_mi', ((edx, 16), 99), '\xC6\x42\x10\x63') - assert_encodes_as(cb, 'MOV8_ai', ((ebx, ecx, 2, 16), 99), '\xC6\x44\x8B\x10\x63') + assert_encodes_as(cb, 'MOV8_mi', ((SEGMENT_NO, edx, 16), 99), + '\xC6\x42\x10\x63') + assert_encodes_as(cb, 'MOV8_ai', ((SEGMENT_NO, ebx, ecx, 2, 16), 99), + '\xC6\x44\x8B\x10\x63') def test_push32(): cb = CodeBuilder32 @@ -188,9 +213,9 @@ def test_sub_ji8(): cb = CodeBuilder32 - assert_encodes_as(cb, 'SUB_ji8', (11223344, 55), - '\x83\x2D\x30\x41\xAB\x00\x37') - assert_encodes_as(cb, 'SUB_mi8', ((edx, 16), 55), + assert_encodes_as(cb, 'SUB_ji8', ((SEGMENT_FS, 11223344), 55), + '\x64\x83\x2D\x30\x41\xAB\x00\x37') + assert_encodes_as(cb, 'SUB_mi8', ((SEGMENT_NO, edx, 16), 55), '\x83\x6A\x10\x37') class CodeBuilder64(CodeBuilderMixin, X86_64_CodeBuilder): @@ -215,17 +240,17 @@ def test_mov_rm_64(): s = CodeBuilder64() - s.MOV_rm(edx, (edi, 0)) - s.MOV_rm(edx, (r12, 0)) - s.MOV_rm(edx, (r13, 0)) + s.MOV_rm(edx, (SEGMENT_NO, edi, 0)) + s.MOV_rm(edx, (SEGMENT_NO, r12, 0)) + s.MOV_rm(edx, (SEGMENT_NO, r13, 0)) assert s.getvalue() == '\x48\x8B\x17\x49\x8b\x14\x24\x49\x8b\x55\x00' def test_mov_rm_negative_64(): s = CodeBuilder64() - s.MOV_rm(edx, (edi, -1)) + s.MOV_rm(edx, (SEGMENT_NO, edi, -1)) assert s.getvalue() == '\x48\x8B\x57\xFF' def test_movsd_xj_64(): s = CodeBuilder64() - s.MOVSD_xj(xmm2, 0x01234567) + s.MOVSD_xj(xmm2, (SEGMENT_NO, 0x01234567)) assert s.getvalue() == '\xF2\x0F\x10\x14\x25\x67\x45\x23\x01' From noreply at buildbot.pypy.org Sun Mar 23 14:33:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 14:33:52 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Adapt the auto-encoding tests to generate %fs and %gs as well Message-ID: <20140323133352.D67FB1C054C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70197:3ffbce87ab7b Date: 2014-03-23 14:33 +0100 http://bitbucket.org/pypy/pypy/changeset/3ffbce87ab7b/ Log: Adapt the auto-encoding tests to generate %fs and %gs as well diff --git a/rpython/jit/backend/x86/test/test_rx86.py b/rpython/jit/backend/x86/test/test_rx86.py --- a/rpython/jit/backend/x86/test/test_rx86.py +++ b/rpython/jit/backend/x86/test/test_rx86.py @@ -254,3 +254,10 @@ s = CodeBuilder64() s.MOVSD_xj(xmm2, (SEGMENT_NO, 0x01234567)) assert s.getvalue() == '\xF2\x0F\x10\x14\x25\x67\x45\x23\x01' + +def test_mov8_jr(): + py.test.skip("currently this is a rare case that generates an extra " + "rex prefix \x40, which doesn't hurt") + s = CodeBuilder64() + s.MOV8_jr((SEGMENT_GS, 51), ebx | BYTE_REG_FLAG) + assert s.getvalue() == '\x65\x88\x1C\x25\x33\x00\x00\x00' diff --git a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -22,7 +22,10 @@ def writechar(self, char): if char != self.expected[self.index:self.index+1]: if (char == self.accept_unnecessary_prefix - and self.index == self.instrindex): + and (self.index == self.instrindex or + (self.index == self.instrindex+1 and + self.expected[self.instrindex] in (rx86.SEGMENT_FS, + rx86.SEGMENT_GS)))): return # ignore the extra character '\x40' print self.op print "\x09from rx86.py:", hexdump(self.expected[self.instrindex:self.index] + char)+"..." @@ -63,6 +66,14 @@ rx86.R.esi, rx86.R.edi] accept_unnecessary_prefix = None methname = '?' + prevseg = rx86.SEGMENT_NO + SEGMAP = {rx86.SEGMENT_NO: rx86.SEGMENT_FS, + rx86.SEGMENT_FS: rx86.SEGMENT_GS, + rx86.SEGMENT_GS: rx86.SEGMENT_NO} + + def getseg(self): + self.prevseg = seg = self.SEGMAP[self.prevseg] + return seg def reg_tests(self): return self.REGS @@ -73,28 +84,31 @@ def xmm_reg_tests(self): return self.reg_tests() - def stack_bp_tests(self, count=COUNT1): + def _all_numbers(self, count): return ([0, 4, -4, 124, 128, -128, -132] + [random.randrange(-0x20000000, 0x20000000) * 4 for i in range(count)]) - def stack_sp_tests(self, count=COUNT1): + def stack_bp_tests(self): + return [(self.getseg(), x) for x in self._all_numbers(COUNT1)] + + def stack_sp_tests(self): return ([0, 4, 124, 128] + [random.randrange(0, 0x20000000) * 4 - for i in range(count)]) + for i in range(COUNT1)]) def memory_tests(self): - return [(reg, ofs) + return [(self.getseg(), reg, ofs) for reg in self.NONSPECREGS - for ofs in self.stack_bp_tests(5) + for ofs in self._all_numbers(5) ] def array_tests(self): - return [(reg1, reg2, scaleshift, ofs) + return [(self.getseg(), reg1, reg2, scaleshift, ofs) for reg1 in self.NONSPECREGS for reg2 in self.NONSPECREGS for scaleshift in [0, 1, 2, 3] - for ofs in self.stack_bp_tests(1) + for ofs in self._all_numbers(1) ] def imm8_tests(self): @@ -109,6 +123,9 @@ [random.randrange(128, 256) for i in range(COUNT1)]) return self.imm8_tests() + v + def addr_tests(self): + return [(self.getseg(), x) for x in self.imm32_tests()] + def relative_tests(self): py.test.skip("explicit test required for %r" % (self.methname,)) @@ -123,10 +140,15 @@ 'a': self.array_tests, 'i': self.imm32_tests, 'i8': self.imm8_tests, - 'j': self.imm32_tests, + 'j': self.addr_tests, 'l': self.relative_tests, } + def assembler_segment(self, segment): + return {rx86.SEGMENT_NO: '', + rx86.SEGMENT_FS: '%fs:', + rx86.SEGMENT_GS: '%gs:'}[segment] + def assembler_operand_reg(self, regnum): return self.REGNAMES[regnum] @@ -137,26 +159,30 @@ def assembler_operand_xmm_reg(self, regnum): return self.XMMREGNAMES[regnum] - def assembler_operand_stack_bp(self, position): - return '%d(%s)' % (position, self.REGNAMES[5]) + def assembler_operand_stack_bp(self, (seg, position)): + return '%s%d(%s)' % (self.assembler_segment(seg), + position, self.REGNAMES[5]) def assembler_operand_stack_sp(self, position): return '%d(%s)' % (position, self.REGNAMES[4]) - def assembler_operand_memory(self, (reg1, offset)): + def assembler_operand_memory(self, (seg, reg1, offset)): if not offset: offset = '' - return '%s(%s)' % (offset, self.REGNAMES[reg1]) + return '%s%s(%s)' % (self.assembler_segment(seg), + offset, self.REGNAMES[reg1]) - def assembler_operand_array(self, (reg1, reg2, scaleshift, offset)): + def assembler_operand_array(self, (seg, reg1, reg2, scaleshift, offset)): + assert isinstance(offset, int) if not offset: offset = '' - return '%s(%s,%s,%d)' % (offset, self.REGNAMES[reg1], - self.REGNAMES[reg2], 1< Author: Armin Rigo Branch: stmgc-c7 Changeset: r70198:85dc2a0ebc48 Date: 2014-03-23 14:50 +0100 http://bitbucket.org/pypy/pypy/changeset/85dc2a0ebc48/ Log: Fix regloc, as far as test_regloc says. diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2671,8 +2671,8 @@ def raw_stack(offset, type=INT): return RawEbpLoc(offset, type) -def heap(addr): - return AddressLoc(ImmedLoc(addr), imm0, 0, 0) +def heap(segment, addr): + return AddressLoc(segment, ImmedLoc(addr), imm0, 0, 0) def not_implemented(msg): msg = '[x86/asm] %s\n' % msg diff --git a/rpython/jit/backend/x86/codebuf.py b/rpython/jit/backend/x86/codebuf.py --- a/rpython/jit/backend/x86/codebuf.py +++ b/rpython/jit/backend/x86/codebuf.py @@ -22,8 +22,7 @@ class MachineCodeBlockWrapper(BlockBuilderMixin, LocationCodeBuilder, codebuilder_cls): - def __init__(self, cpu): - self.stm = cpu.gc_ll_descr.stm + def __init__(self): self.init_block_builder() # a list of relative positions; for each position p, the bytes # at [p-4:p] encode an absolute address that will need to be @@ -54,47 +53,3 @@ adr[0] = intmask(adr[0] - p) valgrind.discard_translations(addr, self.get_relative_pos()) self._dump(addr, "jit-backend-dump", backend_name) - - def in_tl_segment(self, adr): - """Makes 'adr' relative to threadlocal-base if we run in STM. - Before using such a relative address, call SEGTL().""" - if self.stm and we_are_translated(): - # only for STM and not during tests - from rpython.jit.backend.x86 import stmtlocal, rx86 - result = adr - stmtlocal.threadlocal_base() - assert rx86.fits_in_32bits(result) - return result - return adr - - def SEGTL(self): - """Insert segment prefix for thread-local memory if we run - in STM and not during testing. This is used to access thread-local - data structures like the struct stm_thread_local_s.""" - if self.stm and we_are_translated(): - from rpython.jit.backend.x86 import stmtlocal - stmtlocal.tl_segment_prefix(self) - - def SEGC7(self): - """Insert segment prefix for the stmgc-c7 segment of memory - if we run in STM and not during testing. This is used to access - any GC object, or things in the STM_SEGMENT structure.""" - if self.stm and we_are_translated(): - from rpython.jit.backend.x86 import stmtlocal - stmtlocal.c7_segment_prefix(self) - - def SEGC7_if_gc(self, op): - if self.stm and we_are_translated(): - from rpython.jit.backend.x86 import stmtlocal - from rpython.jit.metainterp.resoperation import rop - # - opnum = op.getopnum() - if opnum in (rop.GETFIELD_GC, - rop.GETFIELD_GC_PURE, - rop.GETARRAYITEM_GC, - rop.GETARRAYITEM_GC_PURE, - rop.GETINTERIORFIELD_GC, - rop.SETFIELD_GC, - rop.SETARRAYITEM_GC, - rop.SETINTERIORFIELD_GC, - ): - stmtlocal.c7_segment_prefix(self) diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -3,7 +3,7 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.jit.backend.x86.arch import WORD, IS_X86_32, IS_X86_64 from rpython.tool.sourcetools import func_with_new_name -from rpython.rlib.objectmodel import specialize, instantiate +from rpython.rlib.objectmodel import specialize, instantiate, we_are_translated from rpython.rlib.rarithmetic import intmask from rpython.jit.metainterp.history import FLOAT, INT from rpython.jit.codewriter import longlong @@ -34,7 +34,7 @@ def value_r(self): return self.value def value_b(self): return self.value def value_s(self): return self.value - def value_j(self): return self.value + def value_j(self): raise AssertionError("value_j undefined") def value_i(self): return self.value def value_x(self): return self.value def value_a(self): raise AssertionError("value_a undefined") @@ -206,7 +206,10 @@ _immutable_ = True # The address is base_loc + (scaled_loc << scale) + static_offset - def __init__(self, base_loc, scaled_loc, scale=0, static_offset=0): + def __init__(self, segment,base_loc, scaled_loc, scale=0, static_offset=0): + if not we_are_translated(): + assert segment in (rx86.SEGMENT_NO, rx86.SEGMENT_FS, + rx86.SEGMENT_GS) assert 0 <= scale < 4 assert isinstance(base_loc, ImmedLoc) or isinstance(base_loc, RegLoc) assert isinstance(scaled_loc, ImmedLoc) or isinstance(scaled_loc, RegLoc) @@ -214,18 +217,22 @@ if isinstance(base_loc, ImmedLoc): if isinstance(scaled_loc, ImmedLoc): self._location_code = 'j' - self.value = base_loc.value + (scaled_loc.value << scale) + static_offset + self.loc_j = (segment, base_loc.value + + (scaled_loc.value << scale) + static_offset) else: self._location_code = 'a' - self.loc_a = (rx86.NO_BASE_REGISTER, scaled_loc.value, scale, base_loc.value + static_offset) + self.loc_a = (segment, rx86.NO_BASE_REGISTER, scaled_loc.value, + scale, base_loc.value + static_offset) else: if isinstance(scaled_loc, ImmedLoc): # FIXME: What if base_loc is ebp or esp? self._location_code = 'm' - self.loc_m = (base_loc.value, (scaled_loc.value << scale) + static_offset) + self.loc_m = (segment, base_loc.value, + (scaled_loc.value << scale) + static_offset) else: self._location_code = 'a' - self.loc_a = (base_loc.value, scaled_loc.value, scale, static_offset) + self.loc_a = (segment, base_loc.value, scaled_loc.value, + scale, static_offset) def __repr__(self): dict = {'j': 'value', 'a': 'loc_a', 'm': 'loc_m', 'a':'loc_a'} @@ -240,6 +247,9 @@ return False # not 100% true, but we don't use AddressLoc for locations # really, so it's ok + def value_j(self): + return self.loc_j + def value_a(self): return self.loc_a @@ -248,15 +258,15 @@ def find_unused_reg(self): if self._location_code == 'm': - if self.loc_m[0] == eax.value: + if self.loc_m[1] == eax.value: return edx elif self._location_code == 'a': - if self.loc_a[0] == eax.value: - if self.loc_a[1] == edx.value: + if self.loc_a[1] == eax.value: + if self.loc_a[2] == edx.value: return ecx return edx - if self.loc_a[1] == eax.value: - if self.loc_a[0] == edx.value: + if self.loc_a[2] == eax.value: + if self.loc_a[1] == edx.value: return ecx return edx return eax @@ -265,11 +275,11 @@ result = instantiate(AddressLoc) result._location_code = self._location_code if self._location_code == 'm': - result.loc_m = (self.loc_m[0], self.loc_m[1] + ofs) + result.loc_m = self.loc_m[:2] + (self.loc_m[2] + ofs) elif self._location_code == 'a': - result.loc_a = self.loc_a[:3] + (self.loc_a[3] + ofs,) + result.loc_a = self.loc_a[:4] + (self.loc_a[4] + ofs,) elif self._location_code == 'j': - result.value = self.value + ofs + result.loc_j = (self.loc_j[0], self.loc_j[1] + ofs) else: raise AssertionError(self._location_code) return result @@ -383,9 +393,9 @@ if code1 == 'j': checkvalue = loc1.value_j() elif code1 == 'm': - checkvalue = loc1.value_m()[1] + checkvalue = loc1.value_m()[2] elif code1 == 'a': - checkvalue = loc1.value_a()[3] + checkvalue = loc1.value_a()[4] else: checkvalue = 0 if not rx86.fits_in_32bits(checkvalue): @@ -468,13 +478,13 @@ val2 = self._addr_as_reg_offset(val2) invoke(self, possible_code1 + "m", val1, val2) return - if possible_code1 == 'm' and not fits32(val1[1]): + if possible_code1 == 'm' and not fits32(val1[2]): val1 = self._fix_static_offset_64_m(val1) - if possible_code2 == 'm' and not fits32(val2[1]): + if possible_code2 == 'm' and not fits32(val2[2]): val2 = self._fix_static_offset_64_m(val2) - if possible_code1 == 'a' and not fits32(val1[3]): + if possible_code1 == 'a' and not fits32(val1[4]): val1 = self._fix_static_offset_64_a(val1) - if possible_code2 == 'a' and not fits32(val2[3]): + if possible_code2 == 'a' and not fits32(val2[4]): val2 = self._fix_static_offset_64_a(val2) invoke(self, possible_code1 + possible_code2, val1, val2) return @@ -498,9 +508,9 @@ val = self._addr_as_reg_offset(val) _rx86_getattr(self, name + "_m")(val) return - if possible_code == 'm' and not fits32(val[1]): + if possible_code == 'm' and not fits32(val[2]): val = self._fix_static_offset_64_m(val) - if possible_code == 'a' and not fits32(val[3]): + if possible_code == 'a' and not fits32(val[4]): val = self._fix_static_offset_64_a(val) methname = name + "_" + possible_code _rx86_getattr(self, methname)(val) @@ -531,7 +541,7 @@ return func_with_new_name(INSN, "INSN_" + name) - def _addr_as_reg_offset(self, addr): + def _addr_as_reg_offset(self, (segment, addr)): # Encodes a (64-bit) address as an offset from the scratch register. # If we are within a "reuse_scratch_register" block, we remember the # last value we loaded to the scratch register and encode the address @@ -539,7 +549,7 @@ if self._scratch_register_known: offset = addr - self._scratch_register_value if rx86.fits_in_32bits(offset): - return (X86_64_SCRATCH_REG.value, offset) + return (segment, X86_64_SCRATCH_REG.value, offset) # else: fall through if self._reuse_scratch_register: @@ -547,9 +557,9 @@ self._scratch_register_value = addr self.MOV_ri(X86_64_SCRATCH_REG.value, addr) - return (X86_64_SCRATCH_REG.value, 0) + return (segment, X86_64_SCRATCH_REG.value, 0) - def _fix_static_offset_64_m(self, (basereg, static_offset)): + def _fix_static_offset_64_m(self, (segment, basereg, static_offset)): # For cases where an AddressLoc has the location_code 'm', but # where the static offset does not fit in 32-bits. We have to fall # back to the X86_64_SCRATCH_REG. Returns a new location encoded @@ -558,10 +568,10 @@ self._scratch_register_known = False self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset) self.LEA_ra(X86_64_SCRATCH_REG.value, - (basereg, X86_64_SCRATCH_REG.value, 0, 0)) - return (X86_64_SCRATCH_REG.value, 0) + (rx86.SEGMENT_NO, basereg, X86_64_SCRATCH_REG.value, 0, 0)) + return (segment, X86_64_SCRATCH_REG.value, 0) - def _fix_static_offset_64_a(self, (basereg, scalereg, + def _fix_static_offset_64_a(self, (segment, basereg, scalereg, scale, static_offset)): # For cases where an AddressLoc has the location_code 'a', but # where the static offset does not fit in 32-bits. We have to fall @@ -573,8 +583,8 @@ # if basereg != rx86.NO_BASE_REGISTER: self.LEA_ra(X86_64_SCRATCH_REG.value, - (basereg, X86_64_SCRATCH_REG.value, 0, 0)) - return (X86_64_SCRATCH_REG.value, scalereg, scale, 0) + (rx86.SEGMENT_NO, basereg, X86_64_SCRATCH_REG.value, 0, 0)) + return (segment, X86_64_SCRATCH_REG.value, scalereg, scale, 0) def _load_scratch(self, value): if (self._scratch_register_known diff --git a/rpython/jit/backend/x86/test/test_regloc.py b/rpython/jit/backend/x86/test/test_regloc.py --- a/rpython/jit/backend/x86/test/test_regloc.py +++ b/rpython/jit/backend/x86/test/test_regloc.py @@ -1,5 +1,6 @@ import struct, sys from rpython.jit.backend.x86.rx86 import R +from rpython.jit.backend.x86.rx86 import SEGMENT_NO, SEGMENT_FS from rpython.jit.backend.x86.regloc import * from rpython.jit.backend.x86.test.test_rx86 import CodeBuilder32, CodeBuilder64, assert_encodes_as from rpython.jit.backend.x86.assembler import heap @@ -24,7 +25,7 @@ # 32-bit assert_encodes_as(cb32, "MOV16", (ecx, ebx), '\x66\x89\xD9') assert_encodes_as(cb32, "MOV16", - (AddressLoc(ecx, ImmedLoc(16), 0, 0), ebx), + (AddressLoc(SEGMENT_NO, ecx, ImmedLoc(16), 0, 0), ebx), '\x66\x89\x59\x10') # 64-bit assert_encodes_as(cb64, "MOV16", (r8, ebx), '\x66\x41\x89\xD8') # 11 011 000 @@ -43,21 +44,25 @@ expected = '\x66\x41\xC7\xC1\xC7\xCF' # could be '\x66\x41\xB9\xC7\xCF' assert_encodes_as(cb64, "MOV16", (r9, ImmedLoc(-12345)), expected) assert_encodes_as(cb64, "MOV16", - (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)), + (AddressLoc(SEGMENT_NO, r13, ImmedLoc(0), 0, 0), + ImmedLoc(12345)), '\x66\x41\xC7\x45\x00\x39\x30') def test_cmp_16(): # only 'CMP16_mi' is supported # 32-bit assert_encodes_as(cb32, "CMP16", - (AddressLoc(ecx, ImmedLoc(0), 0, 0), ImmedLoc(21324)), + (AddressLoc(SEGMENT_NO, ecx, ImmedLoc(0), 0, 0), + ImmedLoc(21324)), '\x66\x81\x39\x4c\x53') assert_encodes_as(cb32, "CMP16", - (AddressLoc(esi, ImmedLoc(2), 0, 0), ImmedLoc(-12345)), - '\x66\x81\x7e\x02\xc7\xcf') + (AddressLoc(SEGMENT_FS, esi, ImmedLoc(2), 0, 0), + ImmedLoc(-12345)), + '\x64\x66\x81\x7e\x02\xc7\xcf') # 64-bit assert_encodes_as(cb64, "CMP16", - (AddressLoc(r13, ImmedLoc(0), 0, 0), ImmedLoc(12345)), + (AddressLoc(SEGMENT_NO, r13, ImmedLoc(0), 0, 0), + ImmedLoc(12345)), '\x66\x41\x81\x7D\x00\x39\x30') def test_relocation(): @@ -107,8 +112,8 @@ base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() cb.begin_reuse_scratch_register() - cb.MOV(ecx, heap(base_addr)) - cb.MOV(ecx, heap(base_addr + 8)) + cb.MOV(ecx, heap(SEGMENT_NO, base_addr)) + cb.MOV(ecx, heap(SEGMENT_FS, base_addr + 8)) cb.end_reuse_scratch_register() expected_instructions = ( @@ -116,8 +121,8 @@ '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + # mov rcx, [r11] '\x49\x8B\x0B' + - # mov rcx, [r11+8] - '\x49\x8B\x4B\x08' + # mov rcx, %fs:[r11+8] + '\x64\x49\x8B\x4B\x08' ) assert cb.getvalue() == expected_instructions @@ -126,35 +131,36 @@ def test_64bit_address_1(self): base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() - cb.CMP(ecx, AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr)) + cb.CMP(ecx, AddressLoc(SEGMENT_FS, ImmedLoc(0), ImmedLoc(0), + 0, base_addr)) # this case is a CMP_rj # expected_instructions = ( # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # cmp rcx, [r11] - '\x49\x3B\x0B' + # cmp rcx, %fs:[r11] + '\x64\x49\x3B\x0B' ) assert cb.getvalue() == expected_instructions def test_64bit_address_2(self): base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() - cb.MOV(ecx, AddressLoc(ImmedLoc(0), edx, 3, base_addr)) + cb.MOV(ecx, AddressLoc(SEGMENT_FS, ImmedLoc(0), edx, 3, base_addr)) # this case is a CMP_ra # expected_instructions = ( # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # mov rcx, [r11+8*rdx] - '\x49\x8B\x0C\xD3' + # mov rcx, %fs:[r11+8*rdx] + '\x64\x49\x8B\x0C\xD3' ) assert cb.getvalue() == expected_instructions def test_64bit_address_3(self): base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() - cb.MOV(ecx, AddressLoc(edx, ImmedLoc(0), 0, base_addr)) + cb.MOV(ecx, AddressLoc(SEGMENT_FS, edx, ImmedLoc(0), 0, base_addr)) # this case is a CMP_rm # expected_instructions = ( @@ -162,8 +168,8 @@ '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' # lea r11, [rdx+r11] '\x4E\x8D\x1C\x1A' - # mov rcx, [r11] - '\x49\x8B\x0B' + # mov rcx, %fs:[r11] + '\x64\x49\x8B\x0B' ) assert cb.getvalue() == expected_instructions @@ -173,7 +179,7 @@ cb.begin_reuse_scratch_register() assert cb._reuse_scratch_register is True assert cb._scratch_register_known is False - cb.MOV(ecx, AddressLoc(edx, esi, 2, base_addr)) + cb.MOV(ecx, AddressLoc(SEGMENT_FS, edx, esi, 2, base_addr)) assert cb._reuse_scratch_register is True assert cb._scratch_register_known is False # this case is a CMP_ra @@ -183,8 +189,8 @@ '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' # lea r11, [rdx+r11] '\x4E\x8D\x1C\x1A' - # mov rcx, [r11+4*rsi] - '\x49\x8B\x0C\xB3' + # mov rcx, %fs:[r11+4*rsi] + '\x64\x49\x8B\x0C\xB3' ) assert cb.getvalue() == expected_instructions @@ -204,13 +210,13 @@ def test_MOV_64bit_address_into_r11(self): base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() - cb.MOV(r11, heap(base_addr)) + cb.MOV(r11, heap(SEGMENT_FS, base_addr)) expected_instructions = ( # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' + - # mov r11, [r11] - '\x4D\x8B\x1B' + # mov r11, %fs:[r11] + '\x64\x4D\x8B\x1B' ) assert cb.getvalue() == expected_instructions @@ -218,15 +224,15 @@ immed = -0x01234567 base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() - cb.MOV(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr), + cb.MOV(AddressLoc(SEGMENT_FS, ImmedLoc(0), ImmedLoc(0), 0, base_addr), ImmedLoc(immed)) # this case is a MOV_ji # expected_instructions = ( # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # mov [r11], -0x01234567 - '\x49\xC7\x03\x99\xBA\xDC\xFE' + # mov %fs:[r11], -0x01234567 + '\x64\x49\xC7\x03\x99\xBA\xDC\xFE' ) assert cb.getvalue() == expected_instructions @@ -234,15 +240,15 @@ immed = -0x01234567 base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() - cb.MOV(AddressLoc(ImmedLoc(0), edx, 3, base_addr), + cb.MOV(AddressLoc(SEGMENT_FS, ImmedLoc(0), edx, 3, base_addr), ImmedLoc(immed)) # this case is a MOV_ai # expected_instructions = ( # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # mov [r11+8*rdx], -0x01234567 - '\x49\xC7\x04\xD3\x99\xBA\xDC\xFE' + # mov %fs:[r11+8*rdx], -0x01234567 + '\x64\x49\xC7\x04\xD3\x99\xBA\xDC\xFE' ) assert cb.getvalue() == expected_instructions @@ -250,7 +256,7 @@ immed = -0x01234567 base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() - cb.MOV(AddressLoc(edx, ImmedLoc(0), 0, base_addr), + cb.MOV(AddressLoc(SEGMENT_FS, edx, ImmedLoc(0), 0, base_addr), ImmedLoc(immed)) # this case is a MOV_mi # @@ -259,8 +265,8 @@ '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' # lea r11, [rdx+r11] '\x4E\x8D\x1C\x1A' - # mov [r11], -0x01234567 - '\x49\xC7\x03\x99\xBA\xDC\xFE' + # mov %fs:[r11], -0x01234567 + '\x64\x49\xC7\x03\x99\xBA\xDC\xFE' ) assert cb.getvalue() == expected_instructions @@ -268,7 +274,7 @@ immed = -0x01234567 base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() - cb.MOV(AddressLoc(edx, esi, 2, base_addr), ImmedLoc(immed)) + cb.MOV(AddressLoc(SEGMENT_FS, edx, esi, 2, base_addr), ImmedLoc(immed)) # this case is a MOV_ai # expected_instructions = ( @@ -276,8 +282,8 @@ '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' # lea r11, [rdx+r11] '\x4E\x8D\x1C\x1A' - # mov [r11+4*rsi], -0x01234567 - '\x49\xC7\x04\xB3\x99\xBA\xDC\xFE' + # mov %fs:[r11+4*rsi], -0x01234567 + '\x64\x49\xC7\x04\xB3\x99\xBA\xDC\xFE' ) assert cb.getvalue() == expected_instructions @@ -287,7 +293,7 @@ immed = 0x0123456789ABCDEF base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() - cb.MOV(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr), + cb.MOV(AddressLoc(SEGMENT_FS, ImmedLoc(0), ImmedLoc(0), 0, base_addr), ImmedLoc(immed)) # this case is a MOV_ji # @@ -298,8 +304,8 @@ '\x48\xB8\xEF\xCD\xAB\x89\x67\x45\x23\x01' # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # mov [r11], rax - '\x49\x89\x03' + # mov %fs:[r11], rax + '\x64\x49\x89\x03' # pop rax '\x58' ) @@ -309,7 +315,7 @@ immed = 0x0123456789ABCDEF base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() - cb.MOV(AddressLoc(ImmedLoc(0), edx, 3, base_addr), + cb.MOV(AddressLoc(SEGMENT_FS, ImmedLoc(0), edx, 3, base_addr), ImmedLoc(immed)) # this case is a MOV_ai # @@ -320,8 +326,8 @@ '\x48\xB8\xEF\xCD\xAB\x89\x67\x45\x23\x01' # mov r11, 0xFEDCBA9876543210 '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' - # mov [r11+8*rdx], rax - '\x49\x89\x04\xD3' + # mov %fs:[r11+8*rdx], rax + '\x64\x49\x89\x04\xD3' # pop rax '\x58' ) @@ -331,7 +337,7 @@ immed = 0x0123456789ABCDEF base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() - cb.MOV(AddressLoc(eax, ImmedLoc(0), 0, base_addr), + cb.MOV(AddressLoc(SEGMENT_FS, eax, ImmedLoc(0), 0, base_addr), ImmedLoc(immed)) # this case is a MOV_mi # @@ -344,8 +350,8 @@ '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' # lea r11, [rax+r11] '\x4E\x8D\x1C\x18' - # mov [r11], rdx - '\x49\x89\x13' + # mov %fs:[r11], rdx + '\x64\x49\x89\x13' # pop rdx '\x5A' ) @@ -355,7 +361,7 @@ immed = 0x0123456789ABCDEF base_addr = 0xFEDCBA9876543210 cb = LocationCodeBuilder64() - cb.MOV(AddressLoc(edx, eax, 2, base_addr), ImmedLoc(immed)) + cb.MOV(AddressLoc(SEGMENT_FS, edx, eax, 2, base_addr), ImmedLoc(immed)) # this case is a MOV_ai # expected_instructions = ( @@ -367,8 +373,8 @@ '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' # lea r11, [rdx+r11] '\x4E\x8D\x1C\x1A' - # mov [r11+4*rax], rcx - '\x49\x89\x0C\x83' + # mov %fs:[r11+4*rax], rcx + '\x64\x49\x89\x0C\x83' # pop rcx '\x59' ) @@ -392,14 +398,14 @@ def test_inc_64bit_address_1(self): base_addr = 0x0123456789ABCDEF cb = LocationCodeBuilder64() - cb.INC(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr)) + cb.INC(AddressLoc(SEGMENT_FS, ImmedLoc(0), ImmedLoc(0), 0, base_addr)) # this case is a INC_j # expected_instructions = ( # mov r11, 0x0123456789ABCDEF '\x49\xBB\xEF\xCD\xAB\x89\x67\x45\x23\x01' - # inc [r11] - '\x49\xFF\x03' + # inc %fs:[r11] + '\x64\x49\xFF\x03' ) assert cb.getvalue() == expected_instructions @@ -407,14 +413,14 @@ py.test.skip("there is no unary instruction INSN_a so far") base_addr = 0x0123456789ABCDEF cb = LocationCodeBuilder64() - cb.INC(AddressLoc(ImmedLoc(0), edx, 3, base_addr)) + cb.INC(AddressLoc(SEGMENT_FS, ImmedLoc(0), edx, 3, base_addr)) # this case would be a INC_a xxx def test_inc_64bit_address_3(self): base_addr = 0x0123456789ABCDEF cb = LocationCodeBuilder64() - cb.INC(AddressLoc(eax, ImmedLoc(0), 0, base_addr)) + cb.INC(AddressLoc(SEGMENT_FS, eax, ImmedLoc(0), 0, base_addr)) # this case is a INC_m # expected_instructions = ( @@ -422,7 +428,7 @@ '\x49\xBB\xEF\xCD\xAB\x89\x67\x45\x23\x01' # lea r11, [rax+r11] '\x4E\x8D\x1C\x18' - # inc [r11] - '\x49\xFF\x03' + # inc %fs:[r11] + '\x64\x49\xFF\x03' ) assert cb.getvalue() == expected_instructions From noreply at buildbot.pypy.org Sun Mar 23 15:08:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 15:08:54 +0100 (CET) Subject: [pypy-commit] pypy default: Point out the textual differences Message-ID: <20140323140854.084EE1C3434@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70199:29c8f43f8625 Date: 2014-03-23 15:08 +0100 http://bitbucket.org/pypy/pypy/changeset/29c8f43f8625/ Log: Point out the textual differences diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -139,7 +139,13 @@ txt1 = str(op1) txt2 = str(op2) while txt1 or txt2: - print '%s| %s' % (txt1[:width].ljust(width), txt2[:width]) + part1 = txt1[:width] + part2 = txt2[:width] + if part1 == part2: + sep = '| ' + else: + sep = '<>' + print '%s%s%s' % (part1.ljust(width), sep, part2) txt1 = txt1[width:] txt2 = txt2[width:] print '-' * totwidth From noreply at buildbot.pypy.org Sun Mar 23 15:15:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 15:15:44 +0100 (CET) Subject: [pypy-commit] pypy default: Test fix (OS/X, Win) Message-ID: <20140323141544.80A1E1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70200:99ea05cf50b8 Date: 2014-03-23 15:15 +0100 http://bitbucket.org/pypy/pypy/changeset/99ea05cf50b8/ Log: Test fix (OS/X, Win) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -545,7 +545,7 @@ p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) setfield_gc(p1, 10, descr=unicodelendescr) - p2 = call_malloc_nursery_varsize(2, 4, i2, \ + p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\ descr=unicodedescr) setfield_gc(p2, i2, descr=unicodelendescr) p3 = call_malloc_nursery_varsize(1, 1, i2, \ From noreply at buildbot.pypy.org Sun Mar 23 15:21:01 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 23 Mar 2014 15:21:01 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: merge default into branch Message-ID: <20140323142101.A69651C0290@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70201:dec6e4c4c504 Date: 2014-03-23 16:19 +0200 http://bitbucket.org/pypy/pypy/changeset/dec6e4c4c504/ Log: merge default into branch diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -528,11 +528,6 @@ """) def test_rewrite_assembler_newstr_newunicode(self): - import sys - if sys.platform == 'win32': - unicode_size = 2 - else: - unicode_size = 4 self.check_rewrite(""" [i2] p0 = newstr(14) @@ -543,21 +538,21 @@ """, """ [i2] p0 = call_malloc_nursery( \ - %%(strdescr.basesize + 16 * strdescr.itemsize + \ + %(strdescr.basesize + 16 * strdescr.itemsize + \ unicodedescr.basesize + 10 * unicodedescr.itemsize)d) - setfield_gc(p0, %%(strdescr.tid)d, descr=tiddescr) + setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) setfield_gc(p0, 14, descr=strlendescr) - p1 = int_add(p0, %%(strdescr.basesize + 16 * strdescr.itemsize)d) - setfield_gc(p1, %%(unicodedescr.tid)d, descr=tiddescr) + p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) + setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) setfield_gc(p1, 10, descr=unicodelendescr) - p2 = call_malloc_nursery_varsize(2, %d, i2, \ + p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\ descr=unicodedescr) setfield_gc(p2, i2, descr=unicodelendescr) p3 = call_malloc_nursery_varsize(1, 1, i2, \ descr=strdescr) setfield_gc(p3, i2, descr=strlendescr) jump() - """ % unicode_size) + """) def test_write_barrier_before_setfield_gc(self): self.check_rewrite(""" diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -108,6 +108,7 @@ raise NotImplementedError def getaddr(self): + "Only for raw addresses (BoxInt & ConstInt), not for GC addresses" raise NotImplementedError def sort_key(self): @@ -321,9 +322,6 @@ else: return 0 - def getaddr(self): - return llmemory.cast_ptr_to_adr(self.value) - def same_constant(self, other): if isinstance(other, ConstPtr): return self.value == other.value @@ -494,9 +492,6 @@ return lltype.cast_opaque_ptr(PTR, self.getref_base()) getref._annspecialcase_ = 'specialize:arg(1)' - def getaddr(self): - return llmemory.cast_ptr_to_adr(self.value) - def _get_hash_(self): if self.value: return lltype.identityhash(self.value) diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -139,7 +139,13 @@ txt1 = str(op1) txt2 = str(op2) while txt1 or txt2: - print '%s| %s' % (txt1[:width].ljust(width), txt2[:width]) + part1 = txt1[:width] + part2 = txt2[:width] + if part1 == part2: + sep = '| ' + else: + sep = '<>' + print '%s%s%s' % (part1.ljust(width), sep, part2) txt1 = txt1[width:] txt2 = txt2[width:] print '-' * totwidth From noreply at buildbot.pypy.org Sun Mar 23 16:10:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 16:10:07 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fixes fixes fixes fixes Message-ID: <20140323151007.BB26F1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70202:913db8f92462 Date: 2014-03-23 16:09 +0100 http://bitbucket.org/pypy/pypy/changeset/913db8f92462/ Log: fixes fixes fixes fixes diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -59,6 +59,18 @@ self.stack_check_slowpath = 0 self.propagate_exception_path = 0 self.teardown() + # + self.SEGMENT_NO = rx86.SEGMENT_NO + if translate_support_code and cpu.gc_ll_descr.stm: + assert IS_X86_64 + self.SEGMENT_GC = rx86.SEGMENT_GS + self.SEGMENT_TL = rx86.SEGMENT_FS + self.SEGMENT_FRAME = rx86.SEGMENT_GS + # ^^^ same as SEGMENT_GC, but use it for accessing %ebp + else: + self.SEGMENT_GC = rx86.SEGMENT_NO + self.SEGMENT_TL = rx86.SEGMENT_NO + self.SEGMENT_FRAME = rx86.SEGMENT_NO def setup_once(self): BaseAssembler.setup_once(self) @@ -73,7 +85,7 @@ if WORD == 8: self.pending_memoryerror_trampoline_from = [] self.error_trampoline_64 = 0 - self.mc = codebuf.MachineCodeBlockWrapper(self.cpu) + self.mc = codebuf.MachineCodeBlockWrapper() #assert self.datablockwrapper is None --- but obscure case # possible, e.g. getting MemoryError and continuing allblocks = self.get_asmmemmgr_blocks(looptoken) @@ -109,15 +121,15 @@ def set_extra_stack_depth(self, mc, value): if self._is_asmgcc(): extra_ofs = self.cpu.get_ofs_of_frame_field('jf_extra_stack_depth') - mc.MOV_bi(extra_ofs, value) + mc.MOV_bi((self.SEGMENT_FRAME, extra_ofs), value) def build_frame_realloc_slowpath(self): - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() self._push_all_regs_to_frame(mc, [], self.cpu.supports_floats) # this is the gcmap stored by push_gcmap(mov=True) in _check_stack_frame mc.MOV_rs(ecx.value, WORD) gcmap_ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.MOV_br(gcmap_ofs, ecx.value) + mc.MOV_br((self.SEGMENT_FRAME, gcmap_ofs), ecx.value) if IS_X86_64: mc.MOV_rs(esi.value, WORD*2) @@ -147,7 +159,7 @@ self._load_shadowstack_top_in_ebx(mc, gcrootmap) mc.MOV_mr((ebx.value, -WORD), eax.value) - mc.MOV_bi(gcmap_ofs, 0) + mc.MOV_bi((self.SEGMENT_FRAME, gcmap_ofs), 0) self._pop_all_regs_from_frame(mc, [], self.cpu.supports_floats) mc.RET() self._frame_realloc_slowpath = mc.materialize(self.cpu.asmmemmgr, []) @@ -156,7 +168,7 @@ """ This builds a general call slowpath, for whatever call happens to come. """ - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() # copy registers to the frame, with the exception of the # 'cond_call_register_arguments' and eax, because these have already # been saved by the caller. Note that this is not symmetrical: @@ -196,11 +208,11 @@ This function does not have to preserve registers. It expects all registers to be saved in the caller. """ - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() # store the gc pattern ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') mc.MOV_rs(ecx.value, WORD) - mc.MOV_br(ofs, ecx.value) + mc.MOV_br((self.SEGMENT_FRAME, ofs), ecx.value) # # align on 16b boundary (there is a retaddr on the stack) mc.SUB_ri(esp.value, 16 - WORD) @@ -220,7 +232,7 @@ # mc.ADD_ri(esp.value, 16 - WORD) # clear the gc pattern - mc.MOV_bi(ofs, 0) + mc.MOV_bi((self.SEGMENT_FRAME, ofs), 0) # # Fill the stm resume buffer. Don't do it before the call! # The previous transaction may still be aborted during the call @@ -253,12 +265,12 @@ This function must preserve all registers apart from eax and edi. """ assert kind in ['fixed', 'str', 'unicode', 'var'] - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() self._push_all_regs_to_frame(mc, [eax, edi], self.cpu.supports_floats) # store the gc pattern ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') mc.MOV_rs(ecx.value, WORD) - mc.MOV_br(ofs, ecx.value) + mc.MOV_br((self.SEGMENT_FRAME, ofs), ecx.value) # if kind == 'fixed': addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() @@ -309,10 +321,9 @@ self.set_extra_stack_depth(mc, 0) self._pop_all_regs_from_frame(mc, [eax, edi], self.cpu.supports_floats) nursery_free_adr = self.cpu.gc_ll_descr.get_nursery_free_addr() - mc.SEGC7() - mc.MOV(edi, heap(nursery_free_adr)) # load this in EDI + mc.MOV(edi, heap(self.SEGMENT_GC, nursery_free_adr)) # load this in EDI # clear the gc pattern - mc.MOV_bi(ofs, 0) + mc.MOV_bi((self.SEGMENT_FRAME, ofs), 0) mc.RET() # # If the slowpath malloc failed, we raise a MemoryError that @@ -331,17 +342,17 @@ if not self.cpu.propagate_exception_descr: return # not supported (for tests, or non-translated) # - self.mc = codebuf.MachineCodeBlockWrapper(self.cpu) + self.mc = codebuf.MachineCodeBlockWrapper() # # read and reset the current exception self._store_and_reset_exception(self.mc, eax) ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') - self.mc.MOV_br(ofs, eax.value) + self.mc.MOV_br((self.SEGMENT_FRAME, ofs), eax.value) propagate_exception_descr = rffi.cast(lltype.Signed, cast_instance_to_gcref(self.cpu.propagate_exception_descr)) ofs = self.cpu.get_ofs_of_frame_field('jf_descr') - self.mc.MOV(RawEbpLoc(ofs), imm(propagate_exception_descr)) + self.mc.MOV(self.raw_stack(ofs), imm(propagate_exception_descr)) # self._call_footer() rawstart = self.mc.materialize(self.cpu.asmmemmgr, []) @@ -363,7 +374,7 @@ # | my own retaddr | <-- esp # +---------------------+ # - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() # if IS_X86_64: # on the x86_64, we have to save all the registers that may @@ -384,9 +395,7 @@ else: mc.ADD_ri(esp.value, WORD) # - ea = mc.in_tl_segment(self.cpu.pos_exception()) - mc.SEGTL() - mc.MOV(eax, heap(ea)) + mc.MOV(eax, self.heap_tl(self.cpu.pos_exception())) mc.TEST_rr(eax.value, eax.value) mc.J_il8(rx86.Conditions['NZ'], 0) jnz_location = mc.get_relative_pos() @@ -423,7 +432,7 @@ # all XMM registers. It takes a single argument just pushed # on the stack even on X86_64. It must restore stack alignment # accordingly. - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() # if not for_frame: self._push_all_regs_to_frame(mc, [], withfloats, callee_only=True) @@ -500,7 +509,7 @@ # # make the stm_longjmp_callback() function, with signature # void (*longjmp_callback)(void *stm_resume_buffer) - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() # # 'edi' contains the stm resume buffer, so the new stack # location that we have to enforce is 'edi - FRAME_FIXED_SIZE * WORD'. @@ -511,10 +520,8 @@ # # restore the shadowstack pointer from stm_resume_buffer[1] gcrootmap = self.cpu.gc_ll_descr.gcrootmap - rst = mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE + 1) * WORD) - mc.SEGTL() - mc.MOV_jr(rst, eax.value) + mc.MOV(self.heap_tl(gcrootmap.get_root_stack_top_addr()), eax) # # must restore 'ebp' from its saved value in the shadowstack self._reload_frame_if_necessary(mc) @@ -683,7 +690,7 @@ assert rx86.fits_in_32bits(relative_target) # if not tok.is_guard_not_invalidated: - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() mc.writeimm32(relative_target) mc.copy_to_raw_memory(addr) else: @@ -708,7 +715,7 @@ if WORD == 8: for pos_after_jz in self.pending_memoryerror_trampoline_from: assert self.error_trampoline_64 != 0 # only if non-empty - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() mc.writeimm32(self.error_trampoline_64 - pos_after_jz) mc.copy_to_raw_memory(rawstart + pos_after_jz - 4) @@ -728,7 +735,7 @@ """ descrs = self.cpu.gc_ll_descr.getframedescrs(self.cpu) ofs = self.cpu.unpack_fielddescr(descrs.arraydescr.lendescr) - mc.CMP_bi(ofs, 0xffffff) # force writing 32 bit + mc.CMP_bi((self.SEGMENT_FRAME, ofs), 0xffffff) # force writing 32 bit stack_check_cmp_ofs = mc.get_relative_pos() - 4 mc.J_il8(rx86.Conditions['GE'], 0) jg_location = mc.get_relative_pos() @@ -751,7 +758,7 @@ return descrs = self.cpu.gc_ll_descr.getframedescrs(self.cpu) ofs = self.cpu.unpack_fielddescr(descrs.arraydescr.lendescr) - mc.CMP_bi(ofs, 0xffffff) + mc.CMP_bi((self.SEGMENT_FRAME, ofs), 0xffffff) stack_check_cmp_ofs = mc.get_relative_pos() - 4 mc.J_il8(rx86.Conditions['GE'], 0) jg_location = mc.get_relative_pos() @@ -767,7 +774,7 @@ self.frame_depth_to_patch.append(ofs2) def _patch_frame_depth(self, adr, allocated_depth): - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() mc.writeimm32(allocated_depth) mc.copy_to_raw_memory(adr) @@ -792,7 +799,7 @@ # that. Otherwise, leave the original rel32 to the recovery stub in # place, but clobber the recovery stub with a jump to the real # target. - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() if rx86.fits_in_32bits(offset): mc.writeimm32(offset) mc.copy_to_raw_memory(adr_jump_offset) @@ -881,14 +888,11 @@ # there could have been a collection in invalidate_jmp_buf() # reload the frame into eax, while at the same time popping # it off the shadowstack - rst = self.mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) - assert rx86.fits_in_32bits(rst) - self.mc.SEGTL() - self.mc.MOV_rj(ebx.value, rst) + rst = self.heap_tl(gcrootmap.get_root_stack_top_addr()) + self.mc.MOV(ebx, rst) self.mc.SUB_ri(ebx.value, -WORD) self.mc.MOV_rm(eax.value, (ebx.value, 0)) - self.mc.SEGTL() - self.mc.MOV_jr(rst, ebx.value) + self.mc.MOV(rst, ebx) else: # the return value is the jitframe self.mc.MOV_rr(eax.value, ebp.value) @@ -906,46 +910,23 @@ def _load_shadowstack_top_in_ebx(self, mc, gcrootmap): """Loads the shadowstack top in ebx, and returns an integer - that gives the address of the stack top. If this integer doesn't - fit in 32 bits, it will be loaded in r11. + that gives the address of the stack top. """ - rst = mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) - if rx86.fits_in_32bits(rst): - mc.SEGTL() - mc.MOV_rj(ebx.value, rst) # MOV ebx, [rootstacktop] - else: - mc.MOV_ri(X86_64_SCRATCH_REG.value, rst) # MOV r11, rootstacktop - mc.MOV_rm(ebx.value, (X86_64_SCRATCH_REG.value, 0)) - # MOV ebx, [r11] - # - return rst + mc.MOV(ebx, self.heap_tl(gcrootmap.get_root_stack_top_addr())) def _call_header_shadowstack(self, gcrootmap): # put the frame in ebp on the shadowstack for the GC to find # (ebp is a writeable object and does not need a write-barrier # again (ensured by the code calling the loop)) - rst = self._load_shadowstack_top_in_ebx(self.mc, gcrootmap) + self._load_shadowstack_top_in_ebx(self.mc, gcrootmap) self.mc.MOV_mr((ebx.value, 0), ebp.value) # MOV [ebx], ebp self.mc.ADD_ri(ebx.value, WORD) - - if rx86.fits_in_32bits(rst): - self.mc.SEGTL() - self.mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx - else: - # The integer 'rst' doesn't fit in 32 bits, so we know that - # _load_shadowstack_top_in_ebx() above loaded it in r11. - # Reuse it. Be careful not to overwrite r11 in the middle! - self.mc.MOV_mr((X86_64_SCRATCH_REG.value, 0), - ebx.value) # MOV [r11], ebx + self.mc.MOV(self.heap_tl(gcrootmap.get_root_stack_top_addr()), ebx) + # MOV [rootstacktop], ebx def _call_footer_shadowstack(self, gcrootmap): - rst = self.mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) - if rx86.fits_in_32bits(rst): - self.mc.SEGTL() - self.mc.SUB_ji8(rst, WORD) # SUB [rootstacktop], WORD - else: - self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop - self.mc.SUB_mi8((ebx.value, 0), WORD) # SUB [ebx], WORD + self.mc.SUB(self.heap_tl(gcrootmap.get_root_stack_top_addr()), WORD) + # SUB [rootstacktop], WORD def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking @@ -961,7 +942,7 @@ baseofs = self.cpu.get_baseofs_of_frame_field() newlooptoken.compiled_loop_token.update_frame_info( oldlooptoken.compiled_loop_token, baseofs) - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() mc.JMP(imm(target)) if WORD == 4: # keep in sync with prepare_loop() assert mc.get_relative_pos() == 5 @@ -1003,8 +984,8 @@ self.mc.MOVSD_sx(0, loc.value) elif WORD == 4 and isinstance(loc, FrameLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.PUSH_b(loc.value + 4) - self.mc.PUSH_b(loc.value) + self.mc.PUSH_b((loc.segment, loc.value + 4)) + self.mc.PUSH_b((loc.segment, loc.value)) else: self.mc.PUSH(loc) @@ -1014,8 +995,8 @@ self.mc.ADD_ri(esp.value, 8) # = size of doubles elif WORD == 4 and isinstance(loc, FrameLoc) and loc.get_width() == 8: # XXX evil trick - self.mc.POP_b(loc.value) - self.mc.POP_b(loc.value + 4) + self.mc.POP_b((loc.segment, loc.value)) + self.mc.POP_b((loc.segment, loc.value + 4)) else: self.mc.POP(loc) @@ -1028,8 +1009,8 @@ low_part = intmask(low_part) high_part = intmask(high_part) if isinstance(to_loc, RawEbpLoc): - self.mc.MOV32_bi(to_loc.value, low_part) - self.mc.MOV32_bi(to_loc.value + 4, high_part) + self.mc.MOV32_bi((to_loc.segment, to_loc.value), low_part) + self.mc.MOV32_bi((to_loc.segment, to_loc.value + 4), high_part) else: assert isinstance(to_loc, RawEspLoc) self.mc.MOV32_si(to_loc.value, low_part) @@ -1212,10 +1193,8 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: if gcrootmap.is_shadow_stack: - rst = mc.in_tl_segment(gcrootmap.get_root_stack_top_addr()) - mc.SEGTL() - mc.MOV(ecx, heap(rst)) - mc.MOV(ebp, mem(ecx, -WORD)) + mc.MOV(ecx, self.heap_tl(gcrootmap.get_root_stack_top_addr())) + mc.MOV(ebp, mem(self.SEGMENT_NO, ecx, -WORD)) # wbdescr = self.cpu.gc_ll_descr.write_barrier_descr if gcrootmap and wbdescr: @@ -1408,7 +1387,7 @@ if isinstance(loc, RegLoc): self.mc.MOVD_rx(resloc.value, loc.value) elif isinstance(loc, FrameLoc): - self.mc.MOV_rb(resloc.value, loc.value) + self.mc.MOV(resloc, loc) else: not_implemented("llong_to_int: %s" % (loc,)) @@ -1486,10 +1465,9 @@ # ---------- - def load_from_mem(self, resloc, source_addr, size_loc, sign_loc, op): + def load_from_mem(self, resloc, source_addr, size_loc, sign_loc): size = size_loc.value sign = sign_loc.value - self.mc.SEGC7_if_gc(op) self.generate_one_mov_with_extension(resloc, source_addr, size, sign) def generate_one_mov_with_extension(self, resloc, srcloc, size, sign): @@ -1516,9 +1494,8 @@ else: not_implemented("load_from_mem size = %d" % size) - def save_into_mem(self, dest_addr, value_loc, size_loc, op): + def save_into_mem(self, dest_addr, value_loc, size_loc): size = size_loc.value - self.mc.SEGC7_if_gc(op) if isinstance(value_loc, RegLoc) and value_loc.is_xmm: self.mc.MOVSD(dest_addr, value_loc) elif size == 1: @@ -1533,37 +1510,45 @@ else: assert isinstance(value_loc, FloatImmedLoc) self.mc.MOV(dest_addr, value_loc.low_part_loc()) - self.mc.SEGC7_if_gc(op) self.mc.MOV(dest_addr.add_offset(4), value_loc.high_part_loc()) else: not_implemented("save_into_mem size = %d" % size) - def genop_getfield_gc(self, op, arglocs, resloc): + def _genop_getfield(self, arglocs, resloc, segment): base_loc, ofs_loc, size_loc, sign_loc = arglocs assert isinstance(size_loc, ImmedLoc) - source_addr = AddressLoc(base_loc, ofs_loc) - self.load_from_mem(resloc, source_addr, size_loc, sign_loc, op) + source_addr = AddressLoc(segment, base_loc, ofs_loc) + self.load_from_mem(resloc, source_addr, size_loc, sign_loc) - genop_getfield_raw = genop_getfield_gc - genop_getfield_raw_pure = genop_getfield_gc + def genop_getfield_gc(self, op, arglocs, resloc): + self._genop_getfield(arglocs, resloc, self.SEGMENT_GC) + def genop_getfield_raw(self, op, arglocs, resloc): + self._genop_getfield(arglocs, resloc, self.SEGMENT_NO) + genop_getfield_gc_pure = genop_getfield_gc + genop_getfield_raw_pure = genop_getfield_raw - def genop_getarrayitem_gc(self, op, arglocs, resloc): + def _genop_getarrayitem(self, arglocs, resloc, segment): base_loc, ofs_loc, size_loc, ofs, sign_loc = arglocs assert isinstance(ofs, ImmedLoc) assert isinstance(size_loc, ImmedLoc) scale = get_scale(size_loc.value) - src_addr = addr_add(base_loc, ofs_loc, ofs.value, scale) - self.load_from_mem(resloc, src_addr, size_loc, sign_loc, op) + src_addr = addr_add(segment, base_loc, ofs_loc, ofs.value, scale) + self.load_from_mem(resloc, src_addr, size_loc, sign_loc) + + def genop_getarrayitem_gc(self, op, arglocs, resloc, segment): + self._genop_getarrayitem(arglocs, resloc, self.SEGMENT_GC) + def genop_getarrayitem_raw(self, op, arglocs, resloc, segment): + self._genop_getarrayitem(arglocs, resloc, self.SEGMENT_NO) genop_getarrayitem_gc_pure = genop_getarrayitem_gc - genop_getarrayitem_raw = genop_getarrayitem_gc - genop_getarrayitem_raw_pure = genop_getarrayitem_gc + genop_getarrayitem_raw_pure = genop_getarrayitem_raw def genop_raw_load(self, op, arglocs, resloc): + assert not isinstance(op.getarg(0), BoxPtr) # not for a GC argument! base_loc, ofs_loc, size_loc, ofs, sign_loc = arglocs assert isinstance(ofs, ImmedLoc) - src_addr = addr_add(base_loc, ofs_loc, ofs.value, 0) + src_addr = addr_add(self.SEGMENT_NO, base_loc, ofs_loc, ofs.value, 0) self.load_from_mem(resloc, src_addr, size_loc, sign_loc, op) def _imul_const_scaled(self, mc, targetreg, sourcereg, itemsize): @@ -1609,15 +1594,17 @@ shift = self._imul_const_scaled(self.mc, temp_loc.value, index_loc.value, itemsize) assert isinstance(ofs_loc, ImmedLoc) - return AddressLoc(base_loc, temp_loc, shift, ofs_loc.value) + return AddressLoc(self.SEGMENT_GC, base_loc, temp_loc, + shift, ofs_loc.value) def genop_getinteriorfield_gc(self, op, arglocs, resloc): + assert not isinstance(op.getarg(0), BoxInt) # only for a GC argument! (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, index_loc, temp_loc, sign_loc) = arglocs src_addr = self._get_interiorfield_addr(temp_loc, index_loc, itemsize_loc, base_loc, ofs_loc) - self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc, op) + self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc) def genop_discard_increment_debug_counter(self, op, arglocs): # The argument should be an immediate address. This should @@ -1625,15 +1612,21 @@ # SETFIELD_RAW. Here we use the direct from-memory-to-memory # increment operation of x86. base_loc, = arglocs - self.mc.INC(mem(base_loc, 0)) + self.mc.INC(mem(self.SEGMENT_NO, base_loc, 0)) + + def _genop_discard_setfield(self, arglocs, segment): + base_loc, ofs_loc, size_loc, value_loc = arglocs + assert isinstance(size_loc, ImmedLoc) + dest_addr = AddressLoc(segment, base_loc, ofs_loc) + self.save_into_mem(dest_addr, value_loc, size_loc) def genop_discard_setfield_gc(self, op, arglocs): - base_loc, ofs_loc, size_loc, value_loc = arglocs - assert isinstance(size_loc, ImmedLoc) - dest_addr = AddressLoc(base_loc, ofs_loc) - self.save_into_mem(dest_addr, value_loc, size_loc, op) + self._genop_discard_setfield(arglocs, self.SEGMENT_GC) + def genop_discard_setfield_raw(self, op, arglocs): + self._genop_discard_setfield(arglocs, self.SEGMENT_NO) def genop_discard_setinteriorfield_gc(self, op, arglocs): + assert not isinstance(op.getarg(0), BoxInt) # only for a GC argument! (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, index_loc, temp_loc, value_loc) = arglocs dest_addr = self._get_interiorfield_addr(temp_loc, index_loc, @@ -1641,28 +1634,34 @@ ofs_loc) self.save_into_mem(dest_addr, value_loc, fieldsize_loc, op) - genop_discard_setinteriorfield_raw = genop_discard_setinteriorfield_gc - - def genop_discard_setarrayitem_gc(self, op, arglocs): + def _genop_discard_setarrayitem(self, arglocs, segment): base_loc, ofs_loc, value_loc, size_loc, baseofs = arglocs assert isinstance(baseofs, ImmedLoc) assert isinstance(size_loc, ImmedLoc) scale = get_scale(size_loc.value) - dest_addr = AddressLoc(base_loc, ofs_loc, scale, baseofs.value) - self.save_into_mem(dest_addr, value_loc, size_loc, op) + dest_addr = AddressLoc(segment, base_loc, ofs_loc, + scale, baseofs.value) + self.save_into_mem(dest_addr, value_loc, size_loc) + + def genop_discard_setarrayitem_gc(self, op, arglocs): + self._genop_discard_setarrayitem(arglocs, self.SEGMENT_GC) + def genop_discard_setarrayitem_raw(self, op, arglocs): + self._genop_discard_setarrayitem(arglocs, self.SEGMENT_NO) def genop_discard_raw_store(self, op, arglocs): + assert not isinstance(op.getarg(0), BoxPtr) # not for a GC argument! base_loc, ofs_loc, value_loc, size_loc, baseofs = arglocs assert isinstance(baseofs, ImmedLoc) - dest_addr = AddressLoc(base_loc, ofs_loc, 0, baseofs.value) - self.save_into_mem(dest_addr, value_loc, size_loc, op) + dest_addr = AddressLoc(self.SEGMENT_NO, base_loc, ofs_loc, + 0, baseofs.value) + self.save_into_mem(dest_addr, value_loc, size_loc) def genop_discard_strsetitem(self, op, arglocs): base_loc, ofs_loc, val_loc = arglocs basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, self.cpu.translate_support_code) assert itemsize == 1 - dest_addr = AddressLoc(base_loc, ofs_loc, 0, basesize) + dest_addr = AddressLoc(self.SEGMENT_GC, base_loc, ofs_loc, 0, basesize) self.mc.MOV8(dest_addr, val_loc.lowest8bits()) def genop_discard_unicodesetitem(self, op, arglocs): @@ -1670,15 +1669,14 @@ basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) if itemsize == 4: - self.mc.MOV32(AddressLoc(base_loc, ofs_loc, 2, basesize), val_loc) + self.mc.MOV32(AddressLoc(self.SEGMENT_GC, base_loc, ofs_loc, + 2, basesize), val_loc) elif itemsize == 2: - self.mc.MOV16(AddressLoc(base_loc, ofs_loc, 1, basesize), val_loc) + self.mc.MOV16(AddressLoc(self.SEGMENT_GC, base_loc, ofs_loc, + 1, basesize), val_loc) else: assert 0, itemsize - genop_discard_setfield_raw = genop_discard_setfield_gc - genop_discard_setarrayitem_raw = genop_discard_setarrayitem_gc - def genop_strlen(self, op, arglocs, resloc): base_loc = arglocs[0] basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, @@ -1701,16 +1699,19 @@ basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, self.cpu.translate_support_code) assert itemsize == 1 - self.mc.MOVZX8(resloc, AddressLoc(base_loc, ofs_loc, 0, basesize)) + self.mc.MOVZX8(resloc, AddressLoc(self.SEGMENT_GC, base_loc, + ofs_loc, 0, basesize)) def genop_unicodegetitem(self, op, arglocs, resloc): base_loc, ofs_loc = arglocs basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) if itemsize == 4: - self.mc.MOV32(resloc, AddressLoc(base_loc, ofs_loc, 2, basesize)) + self.mc.MOV32(resloc, AddressLoc(self.SEGMENT_GC, base_loc, + ofs_loc, 2, basesize)) elif itemsize == 2: - self.mc.MOVZX16(resloc, AddressLoc(base_loc, ofs_loc, 1, basesize)) + self.mc.MOVZX16(resloc, AddressLoc(self.SEGMENT_GC, base_loc, + ofs_loc, 1, basesize)) else: assert 0, itemsize @@ -1733,9 +1734,7 @@ def genop_guard_guard_no_exception(self, ign_1, guard_op, guard_token, locs, ign_2): - ea = self.mc.in_tl_segment(self.cpu.pos_exception()) - self.mc.SEGTL() - self.mc.CMP(heap(ea), imm0) + self.mc.CMP(self.heap_tl(self.cpu.pos_exception()), imm0) self.implement_guard(guard_token, 'NZ') def genop_guard_guard_not_invalidated(self, ign_1, guard_op, guard_token, @@ -1748,9 +1747,7 @@ locs, resloc): loc = locs[0] loc1 = locs[1] - ea = self.mc.in_tl_segment(self.cpu.pos_exception()) - self.mc.SEGTL() - self.mc.MOV(loc1, heap(ea)) + self.mc.MOV(loc1, self.heap_tl(self.cpu.pos_exception())) self.mc.CMP(loc1, loc) self.implement_guard(guard_token, 'NE') self._store_and_reset_exception(self.mc, resloc) @@ -1760,44 +1757,31 @@ """ Resest the exception. If excvalloc is None, then store it on the frame in jf_guard_exc """ - eva = mc.in_tl_segment(self.cpu.pos_exc_value()) - ea = mc.in_tl_segment(self.cpu.pos_exception()) - # if excvalloc is not None: assert excvalloc.is_core_reg() - mc.SEGTL() - mc.MOV(excvalloc, heap(eva)) + mc.MOV(excvalloc, self.heap_tl(self.cpu.pos_exc_value())) elif tmploc is not None: # if both are None, just ignore ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') - mc.SEGTL() - mc.MOV(tmploc, heap(eva)) - mc.MOV(RawEbpLoc(ofs), tmploc) + mc.MOV(tmploc, self.heap_tl(self.cpu.pos_exc_value())) + mc.MOV(self.raw_stack(ofs), tmploc) # if exctploc is not None: assert exctploc.is_core_reg() - mc.SEGTL() - mc.MOV(exctploc, heap(ea)) + mc.MOV(exctploc, self.heap_tl(self.cpu.pos_exception())) # - mc.SEGTL() - mc.MOV(heap(ea), imm0) - mc.SEGTL() - mc.MOV(heap(eva), imm0) + mc.MOV(self.heap_tl(self.cpu.pos_exception()), imm0) + mc.MOV(self.heap_tl(self.cpu.pos_exc_value()), imm0) def _restore_exception(self, mc, excvalloc, exctploc, tmploc=None): - eva = mc.in_tl_segment(self.cpu.pos_exc_value()) - ea = mc.in_tl_segment(self.cpu.pos_exception()) if excvalloc is not None: - mc.SEGTL() - mc.MOV(heap(eva), excvalloc) + mc.MOV(heap(self.cpu.pos_exc_value()), excvalloc) else: assert tmploc is not None ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') - mc.MOV(tmploc, RawEbpLoc(ofs)) - mc.MOV_bi(ofs, 0) - mc.SEGTL() - mc.MOV(heap(eva), tmploc) - mc.SEGTL() - mc.MOV(heap(ea), exctploc) + mc.MOV(tmploc, self.raw_stack(ofs)) + mc.MOV_bi((self.SEGMENT_FRAME, ofs), 0) + mc.MOV(self.heap_tl(self.cpu.pos_exc_value()), tmploc) + mc.MOV(self.heap_tl(self.cpu.pos_exception()), exctploc) def _gen_guard_overflow(self, guard_op, guard_token): guard_opnum = guard_op.getopnum() @@ -1838,7 +1822,7 @@ def _cmp_guard_class(self, locs): offset = self.cpu.vtable_offset if offset is not None: - self.mc.CMP(mem(locs[0], offset), locs[1]) + self.mc.CMP(mem(self.SEGMENT_NO, locs[0], offset), locs[1]) else: # XXX hard-coded assumption: to go from an object to its class # we use the following algorithm: @@ -1864,9 +1848,11 @@ expected_typeid = classptr - sizeof_ti - type_info_group if IS_X86_32: expected_typeid >>= 2 - self.mc.CMP16(mem(locs[0], 0), ImmedLoc(expected_typeid)) + self.mc.CMP16(mem(self.SEGMENT_NO, locs[0], 0), + ImmedLoc(expected_typeid)) elif IS_X86_64: - self.mc.CMP32_mi((locs[0].value, 0), expected_typeid) + self.mc.CMP32_mi((self.SEGMENT_NO, locs[0].value, 0), + expected_typeid) def genop_guard_guard_class(self, ign_1, guard_op, guard_token, locs, ign_2): self._cmp_guard_class(locs) @@ -1923,15 +1909,15 @@ else: assert store ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.MOV(raw_stack(ofs), imm(rffi.cast(lltype.Signed, gcmap))) + mc.MOV(self.raw_stack(ofs), imm(rffi.cast(lltype.Signed, gcmap))) def pop_gcmap(self, mc): ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.MOV_bi(ofs, 0) + mc.MOV_bi((self.SEGMENT_FRAME, ofs), 0) def new_stack_loc(self, i, pos, tp): base_ofs = self.cpu.get_baseofs_of_frame_field() - return FrameLoc(i, get_ebp_ofs(base_ofs, i), tp) + return FrameLoc(self.SEGMENT_FRAME, i, get_ebp_ofs(base_ofs, i), tp) def setup_failure_recovery(self): self.failure_recovery_code = [0, 0, 0, 0] @@ -1947,7 +1933,7 @@ for gpr in regs: if gpr not in ignored_regs: v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] - mc.MOV_br(v * WORD + base_ofs, gpr.value) + mc.MOV_br((self.SEGMENT_FRAME, v * WORD + base_ofs), gpr.value) if withfloats: if IS_X86_64: coeff = 1 @@ -1956,7 +1942,8 @@ # Push all XMM regs ofs = len(gpr_reg_mgr_cls.all_regs) for i in range(len(xmm_reg_mgr_cls.all_regs)): - mc.MOVSD_bx((ofs + i * coeff) * WORD + base_ofs, i) + mc.MOVSD_bx((self.SEGMENT_FRAME, + (ofs + i * coeff) * WORD + base_ofs), i) def _pop_all_regs_from_frame(self, mc, ignored_regs, withfloats, callee_only=False): @@ -1969,7 +1956,7 @@ for gpr in regs: if gpr not in ignored_regs: v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] - mc.MOV_rb(gpr.value, v * WORD + base_ofs) + mc.MOV_rb(gpr.value, (self.SEGMENT_FRAME, v * WORD + base_ofs)) if withfloats: # Pop all XMM regs if IS_X86_64: @@ -1978,27 +1965,23 @@ coeff = 2 ofs = len(gpr_reg_mgr_cls.all_regs) for i in range(len(xmm_reg_mgr_cls.all_regs)): - mc.MOVSD_xb(i, (ofs + i * coeff) * WORD + base_ofs) + mc.MOVSD_xb(i, (self.SEGMENT_FRAME, + (ofs + i * coeff) * WORD + base_ofs)) def _build_failure_recovery(self, exc, withfloats=False): - mc = codebuf.MachineCodeBlockWrapper(self.cpu) + mc = codebuf.MachineCodeBlockWrapper() self.mc = mc self._push_all_regs_to_frame(mc, [], withfloats) if exc: # We might have an exception pending. Load it into ebx... - eva = mc.in_tl_segment(self.cpu.pos_exc_value()) - ea = mc.in_tl_segment(self.cpu.pos_exception()) - mc.SEGTL() - mc.MOV(ebx, heap(eva)) - mc.SEGTL() - mc.MOV(heap(ea), imm0) - mc.SEGTL() - mc.MOV(heap(eva), imm0) + mc.MOV(ebx, self.heap_tl(self.cpu.pos_exc_value())) + mc.MOV(self.heap_tl(self.cpu.pos_exception()), imm0) + mc.MOV(self.heap_tl(self.cpu.pos_exc_value()), imm0) # ...and save ebx into 'jf_guard_exc' offset = self.cpu.get_ofs_of_frame_field('jf_guard_exc') - mc.MOV_br(offset, ebx.value) + mc.MOV_br((self.SEGMENT_FRAME, offset), ebx.value) # now we return from the complete frame, which starts from # _call_header_with_stack_check(). The LEA in _call_footer below @@ -2006,8 +1989,8 @@ # did just above. ofs = self.cpu.get_ofs_of_frame_field('jf_descr') ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.POP_b(ofs2) - mc.POP_b(ofs) + mc.POP_b((self.SEGMENT_FRAME, ofs2)) + mc.POP_b((self.SEGMENT_FRAME, ofs)) self._call_footer() rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -2022,11 +2005,11 @@ size = WORD * 2 else: size = WORD - self.save_into_mem(raw_stack(base_ofs), return_val, imm(size), op) + self.save_into_mem(self.raw_stack(base_ofs), return_val, imm(size)) else: [fail_descr_loc] = arglocs ofs = self.cpu.get_ofs_of_frame_field('jf_descr') - self.mov(fail_descr_loc, RawEbpLoc(ofs)) + self.mov(fail_descr_loc, self.raw_stack(ofs)) arglist = op.getarglist() if arglist and arglist[0].type == REF: if self._finish_gcmap: @@ -2039,7 +2022,7 @@ # note that the 0 here is redundant, but I would rather # keep that one and kill all the others ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') - self.mc.MOV_bi(ofs, 0) + self.mc.MOV_bi((self.SEGMENT_FRAME, ofs), 0) # exit function self._call_footer() @@ -2083,12 +2066,12 @@ def _store_force_index(self, guard_op): faildescr = guard_op.getdescr() ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - self.mc.MOV(raw_stack(ofs), imm(rffi.cast(lltype.Signed, + self.mc.MOV(self.raw_stack(ofs), imm(rffi.cast(lltype.Signed, cast_instance_to_gcref(faildescr)))) def _emit_guard_not_forced(self, guard_token): ofs = self.cpu.get_ofs_of_frame_field('jf_descr') - self.mc.CMP_bi(ofs, 0) + self.mc.CMP_bi((self.SEGMENT_FRAME, ofs), 0) self.implement_guard(guard_token, 'NE') def genop_guard_call_may_force(self, op, guard_op, guard_token, @@ -2162,7 +2145,7 @@ def _call_assembler_check_descr(self, value, tmploc): ofs = self.cpu.get_ofs_of_frame_field('jf_descr') - self.mc.CMP(mem(eax, ofs), imm(value)) + self.mc.CMP(mem(self.SEGMENT_FRAME, eax, ofs), imm(value)) # patched later self.mc.J_il8(rx86.Conditions['E'], 0) # goto B if we get 'done_with_this_frame' return self.mc.get_relative_pos() @@ -2170,7 +2153,7 @@ def _call_assembler_patch_je(self, result_loc, je_location): if (IS_X86_32 and isinstance(result_loc, FrameLoc) and result_loc.type == FLOAT): - self.mc.FSTPL_b(result_loc.value) + self.mc.FSTPL_b((result_loc.segment, result_loc.value)) self.mc.JMP_l8(0) # jump to done, patched later jmp_location = self.mc.get_relative_pos() # @@ -2224,7 +2207,7 @@ loc_base = arglocs[0] if is_frame: assert loc_base is ebp - loc = raw_stack(descr.jit_wb_if_flag_byteofs) + loc = self.raw_stack(descr.jit_wb_if_flag_byteofs) else: loc = addr_add_const(loc_base, descr.jit_wb_if_flag_byteofs) mc.TEST8(loc, imm(mask)) @@ -2363,7 +2346,8 @@ if gpr not in should_be_saved: continue v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] - self.mc.MOV_br(v * WORD + base_ofs, gpr.value) + self.mc.MOV_br((self.SEGMENT_FRAME, v * WORD + base_ofs), + gpr.value) # # load the 0-to-4 arguments into these registers from rpython.jit.backend.x86.jump import remap_frame_layout @@ -2398,11 +2382,9 @@ def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, gcmap): assert size & (WORD-1) == 0 # must be correctly aligned - self.mc.SEGC7() - self.mc.MOV(eax, heap(nursery_free_adr)) + self.mc.MOV(eax, heap(self.SEGMENT_GC, nursery_free_adr)) self.mc.LEA_rm(edi.value, (eax.value, size)) - self.mc.SEGC7() - self.mc.CMP(edi, heap(nursery_top_adr)) + self.mc.CMP(edi, heap(self.SEGMENT_GC, nursery_top_adr)) self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() # save the gcmap @@ -2411,8 +2393,7 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) - self.mc.SEGC7() - self.mc.MOV(heap(nursery_free_adr), edi) + self.mc.MOV(heap(self.SEGMENT_GC, nursery_free_adr), edi) def malloc_cond_varsize_frame(self, nursery_free_adr, nursery_top_adr, sizeloc, gcmap): @@ -2421,8 +2402,7 @@ if sizeloc is eax: self.mc.MOV(edi, sizeloc) sizeloc = edi - self.mc.SEGC7() - self.mc.MOV(eax, heap(nursery_free_adr)) + self.mc.MOV(eax, heap(self.SEGMENT_GC, nursery_free_adr)) if self.cpu.gc_ll_descr.stm: constsize = self.cpu.get_baseofs_of_frame_field() shift = get_scale(WORD) @@ -2432,8 +2412,7 @@ self.mc.ADD_rr(edi.value, eax.value) else: self.mc.LEA_ra(edi.value, (eax.value, sizeloc.value, 0, 0)) - self.mc.SEGC7() - self.mc.CMP(edi, heap(nursery_top_adr)) + self.mc.CMP(edi, heap(self.SEGMENT_GC, nursery_top_adr)) self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() # save the gcmap @@ -2442,8 +2421,7 @@ offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) - self.mc.SEGC7() - self.mc.MOV(heap(nursery_free_adr), edi) + self.mc.MOV(heap(self.SEGMENT_GC, nursery_free_adr), edi) def malloc_cond_varsize(self, kind, nursery_free_adr, nursery_top_adr, lengthloc, itemsize, maxlength, gcmap, @@ -2463,8 +2441,7 @@ self.mc.J_il8(rx86.Conditions['A'], 0) # patched later jmp_adr0 = self.mc.get_relative_pos() - self.mc.SEGC7() - self.mc.MOV(eax, heap(nursery_free_adr)) + self.mc.MOV(eax, heap(self.SEGMENT_GC, nursery_free_adr)) if valid_addressing_size(itemsize): shift = get_scale(itemsize) else: @@ -2484,8 +2461,7 @@ self.mc.AND_ri(edi.value, ~(WORD - 1)) # now edi contains the total size in bytes, rounded up to a multiple # of WORD, plus nursery_free_adr - self.mc.SEGC7() - self.mc.CMP(edi, heap(nursery_top_adr)) + self.mc.CMP(edi, heap(self.SEGMENT_GC, nursery_top_adr)) self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr1 = self.mc.get_relative_pos() # @@ -2514,10 +2490,9 @@ assert 0 < offset <= 127 self.mc.overwrite(jmp_adr1-1, chr(offset)) # write down the tid, but not if it's the result of the CALL - self.mc.MOV(mem(eax, 0), imm(arraydescr.tid)) + self.mc.MOV(mem(self.SEGMENT_GC, eax, 0), imm(arraydescr.tid)) # while we're at it, this line is not needed if we've done the CALL - self.mc.SEGC7() - self.mc.MOV(heap(nursery_free_adr), edi) + self.mc.MOV(heap(self.SEGMENT_GC, nursery_free_adr), edi) # offset = self.mc.get_relative_pos() - jmp_location assert 0 < offset <= 127 @@ -2568,14 +2543,15 @@ base_ofs = self.cpu.get_baseofs_of_frame_field() for gpr in self._regalloc.rm.reg_bindings.values(): v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] - mc.MOV_br(v * WORD + base_ofs, gpr.value) + mc.MOV_br((self.SEGMENT_FRAME, v * WORD + base_ofs), gpr.value) if IS_X86_64: coeff = 1 else: coeff = 2 ofs = len(gpr_reg_mgr_cls.all_regs) for xr in self._regalloc.xrm.reg_bindings.values(): - mc.MOVSD_bx((ofs + xr.value * coeff) * WORD + base_ofs, xr.value) + mc.MOVSD_bx((self.SEGMENT_FRAME, + (ofs + xr.value * coeff) * WORD + base_ofs), xr.value) # # CALL break function fn = self.stm_transaction_break_path @@ -2587,14 +2563,15 @@ base_ofs = self.cpu.get_baseofs_of_frame_field() for gpr in self._regalloc.rm.reg_bindings.values(): v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] - mc.MOV_rb(gpr.value, v * WORD + base_ofs) + mc.MOV_rb(gpr.value, (self.SEGMENT_FRAME, v * WORD + base_ofs)) if IS_X86_64: coeff = 1 else: coeff = 2 ofs = len(gpr_reg_mgr_cls.all_regs) for xr in self._regalloc.xrm.reg_bindings.values(): - mc.MOVSD_xb(xr.value, (ofs + xr.value * coeff) * WORD + base_ofs) + mc.MOVSD_xb(xr.value, (self.SEGMENT_FRAME, + (ofs + xr.value * coeff) * WORD + base_ofs)) # # patch the JZ above offset = mc.get_relative_pos() - jz_location2 @@ -2607,17 +2584,15 @@ todo() # "needed for X86_64_SCRATCH_REG" mc = self.mc rmreg = X86_64_SCRATCH_REG.value - mc.SEGC7() - mc.MOVZX8_rj(rmreg, rstm.adr_transaction_read_version) - # + mc.MOVZX8_rj(rmreg, (self.SEGMENT_GC, + rstm.adr_transaction_read_version)) loc_src, loc_tmp = arglocs if loc_tmp is None: assert isinstance(loc_src, ImmedLoc) assert loc_src.value > 0 mem = loc_src.value >> 4 assert rx86.fits_in_32bits(mem) - mc.SEGC7() - mc.MOV8_jr(mem, rmreg | rx86.BYTE_REG_FLAG) + mc.MOV8_jr((self.SEGMENT_GC, mem), rmreg | rx86.BYTE_REG_FLAG) else: assert isinstance(loc_tmp, RegLoc) if isinstance(loc_src, ImmedLoc): @@ -2626,8 +2601,21 @@ if loc_tmp is not loc_src: mc.MOV(loc_tmp, loc_src) mc.SHR_ri(loc_tmp.value, 4) - mc.SEGC7() - mc.MOV8_mr((loc_tmp.value, 0), rmreg | rx86.BYTE_REG_FLAG) + mc.MOV8_mr((self.SEGMENT_GC, loc_tmp.value, 0), + rmreg | rx86.BYTE_REG_FLAG) + + def raw_stack(self, offset, type=INT): + return RawEbpLoc(self.SEGMENT_FRAME, offset, type) + + def heap_tl(self, adr): + """Makes 'adr' relative to threadlocal-base if we run in STM. + Returns a heap()-like AddressLoc. + """ + if self.SEGMENT_TL != self.SEGMENT_NO: + # only for STM and not during tests + adr -= stmtlocal.threadlocal_base() + assert rx86.fits_in_32bits(adr) + return heap(self.SEGMENT_TL, adr) genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST @@ -2659,17 +2647,14 @@ genop_list[num] = value # XXX: ri386 migration shims: -def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0): - return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset) +def addr_add(segment, reg_or_imm1, reg_or_imm2, offset=0, scale=0): + return AddressLoc(segment, reg_or_imm1, reg_or_imm2, scale, offset) -def addr_add_const(reg_or_imm1, offset): - return AddressLoc(reg_or_imm1, imm0, 0, offset) +def addr_add_const(segment, reg_or_imm1, offset): + return AddressLoc(segment, reg_or_imm1, imm0, 0, offset) -def mem(loc, offset): - return AddressLoc(loc, imm0, 0, offset) - -def raw_stack(offset, type=INT): - return RawEbpLoc(offset, type) +def mem(segment, loc, offset): + return AddressLoc(segment, loc, imm0, 0, offset) def heap(segment, addr): return AddressLoc(segment, ImmedLoc(addr), imm0, 0, 0) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -88,12 +88,14 @@ save_around_call_regs = all_regs class X86FrameManager(FrameManager): - def __init__(self, base_ofs): + def __init__(self, base_ofs, segment): FrameManager.__init__(self) self.base_ofs = base_ofs + self.segment = segment def frame_pos(self, i, box_type): - return FrameLoc(i, get_ebp_ofs(self.base_ofs, i), box_type) + return FrameLoc(self.segment, i, + get_ebp_ofs(self.base_ofs, i), box_type) @staticmethod def frame_size(box_type): @@ -134,7 +136,8 @@ def _prepare(self, inputargs, operations, allgcrefs): cpu = self.assembler.cpu - self.fm = X86FrameManager(cpu.get_baseofs_of_frame_field()) + self.fm = X86FrameManager(cpu.get_baseofs_of_frame_field(), + self.assembler.SEGMENT_FRAME) operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables @@ -962,8 +965,6 @@ self.perform_discard(op, [base_loc, ofs, itemsize, fieldsize, index_loc, temp_loc, value_loc]) - consider_setinteriorfield_raw = consider_setinteriorfield_gc - def consider_strsetitem(self, op): args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -32,7 +32,7 @@ raise NotImplementedError def value_r(self): return self.value - def value_b(self): return self.value + def value_b(self): raise AssertionError("value_b undefined") def value_s(self): return self.value def value_j(self): raise AssertionError("value_j undefined") def value_i(self): return self.value @@ -58,10 +58,17 @@ _immutable_ = True _location_code = 'b' - def __init__(self, value, type=INT): + def __init__(self, segment, value, type=INT): + if not we_are_translated(): + assert segment in (rx86.SEGMENT_NO, rx86.SEGMENT_FS, + rx86.SEGMENT_GS) + self.segment = segment self.value = value self.type = type + def value_b(self): + return (self.segment, self.value) + def get_width(self): if self.type == FLOAT: return 8 @@ -112,12 +119,13 @@ class FrameLoc(RawEbpLoc): _immutable_ = True - - def __init__(self, position, ebp_offset, type): + + def __init__(self, segment, position, ebp_offset, type): # _getregkey() returns self.value; the value returned must not # conflict with RegLoc._getregkey(). It doesn't a bit by chance, # so let it fail the following assert if it no longer does. assert ebp_offset >= 8 + 8 * IS_X86_64 + self.segment = segment self.position = position #if position != 9999: # assert (position + JITFRAME_FIXED_SIZE) * WORD == ebp_offset diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -338,7 +338,6 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, - rop.SETINTERIORFIELD_RAW, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, rop.CALL_MALLOC_GC, diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -267,7 +267,6 @@ opnum == rop.SETFIELD_RAW or # no effect on GC struct/array opnum == rop.SETARRAYITEM_GC or # handled specially opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct - opnum == rop.SETINTERIORFIELD_RAW or # no effect on GC struct opnum == rop.RAW_STORE or # no effect on GC struct opnum == rop.STRSETITEM or # no effect on GC struct/array opnum == rop.UNICODESETITEM or # no effect on GC struct/array diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -494,7 +494,6 @@ 'SETARRAYITEM_GC/3d', 'SETARRAYITEM_RAW/3d', 'SETINTERIORFIELD_GC/3d', - 'SETINTERIORFIELD_RAW/3d', # only used by llsupport/rewrite.py 'RAW_STORE/3d', 'SETFIELD_GC/2d', 'SETFIELD_RAW/2d', From noreply at buildbot.pypy.org Sun Mar 23 16:18:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 16:18:26 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: More fixes Message-ID: <20140323151826.2FA821C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70203:9b37af797a13 Date: 2014-03-23 16:17 +0100 http://bitbucket.org/pypy/pypy/changeset/9b37af797a13/ Log: More fixes diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -157,7 +157,7 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: self._load_shadowstack_top_in_ebx(mc, gcrootmap) - mc.MOV_mr((ebx.value, -WORD), eax.value) + mc.MOV_mr((self.SEGMENT_NO, ebx.value, -WORD), eax.value) mc.MOV_bi((self.SEGMENT_FRAME, gcmap_ofs), 0) self._pop_all_regs_from_frame(mc, [], self.cpu.supports_floats) @@ -474,7 +474,8 @@ mc.MOV_rs(eax.value, 3*WORD) else: mc.MOV_rs(eax.value, WORD) - mc.TEST8(addr_add_const(eax, descr.jit_wb_if_flag_byteofs), + mc.TEST8(addr_add_const(self.SEGMENT_GC, eax, + descr.jit_wb_if_flag_byteofs), imm(-0x80)) # @@ -891,7 +892,7 @@ rst = self.heap_tl(gcrootmap.get_root_stack_top_addr()) self.mc.MOV(ebx, rst) self.mc.SUB_ri(ebx.value, -WORD) - self.mc.MOV_rm(eax.value, (ebx.value, 0)) + self.mc.MOV_rm(eax.value, (self.SEGMENT_NO, ebx.value, 0)) self.mc.MOV(rst, ebx) else: # the return value is the jitframe @@ -919,7 +920,8 @@ # (ebp is a writeable object and does not need a write-barrier # again (ensured by the code calling the loop)) self._load_shadowstack_top_in_ebx(self.mc, gcrootmap) - self.mc.MOV_mr((ebx.value, 0), ebp.value) # MOV [ebx], ebp + self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0), ebp.value) + # MOV [ebx], ebp self.mc.ADD_ri(ebx.value, WORD) self.mc.MOV(self.heap_tl(gcrootmap.get_root_stack_top_addr()), ebx) # MOV [rootstacktop], ebx @@ -1057,7 +1059,8 @@ resloc, frame_depth) def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm0): - self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale)) + self.mc.LEA(result, addr_add(self.SEGMENT_NO, frm, sizereg, + baseofs, scale)) def _unaryop(asmop): def genop_unary(self, op, arglocs, resloc): @@ -1084,7 +1087,8 @@ delta = argloc.value if not is_add: # subtraction delta = -delta - self.mc.LEA_rm(result_loc.value, (loc.value, delta)) + self.mc.LEA_rm(result_loc.value, + (self.SEGMENT_NO, loc.value, delta)) return genop_binary_or_lea def _cmpop(cond, rev_cond): @@ -1773,6 +1777,7 @@ mc.MOV(self.heap_tl(self.cpu.pos_exc_value()), imm0) def _restore_exception(self, mc, excvalloc, exctploc, tmploc=None): + # for _build_wb_slowpath(): don't touch the cpu flags! if excvalloc is not None: mc.MOV(heap(self.cpu.pos_exc_value()), excvalloc) else: @@ -2169,13 +2174,14 @@ kind = op.result.type descr = self.cpu.getarraydescr_for_frame(kind) ofs = self.cpu.unpack_arraydescr(descr) + eax_plus_ofs = (self.SEGMENT_FRAME, eax.value, ofs) if kind == FLOAT: - self.mc.MOVSD_xm(xmm0.value, (eax.value, ofs)) + self.mc.MOVSD_xm(xmm0.value, eax_plus_ofs) if result_loc is not xmm0: self.mc.MOVSD(result_loc, xmm0) else: assert result_loc is eax - self.mc.MOV_rm(eax.value, (eax.value, ofs)) + self.mc.MOV_rm(eax.value, eax_plus_ofs) def _call_assembler_patch_jmp(self, jmp_location): offset = self.mc.get_relative_pos() - jmp_location @@ -2383,7 +2389,7 @@ def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, gcmap): assert size & (WORD-1) == 0 # must be correctly aligned self.mc.MOV(eax, heap(self.SEGMENT_GC, nursery_free_adr)) - self.mc.LEA_rm(edi.value, (eax.value, size)) + self.mc.LEA_rm(edi.value, (self.SEGMENT_NO, eax.value, size)) self.mc.CMP(edi, heap(self.SEGMENT_GC, nursery_top_adr)) self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() From noreply at buildbot.pypy.org Sun Mar 23 16:21:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 16:21:17 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fixes Message-ID: <20140323152117.85FE51C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70204:05d2f79f365d Date: 2014-03-23 16:20 +0100 http://bitbucket.org/pypy/pypy/changeset/05d2f79f365d/ Log: fixes diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1685,18 +1685,21 @@ base_loc = arglocs[0] basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, self.cpu.translate_support_code) - self.mc.MOV(resloc, addr_add_const(base_loc, ofs_length)) + self.mc.MOV(resloc, + addr_add_const(self.SEGMENT_GC, base_loc, ofs_length)) def genop_unicodelen(self, op, arglocs, resloc): base_loc = arglocs[0] basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) - self.mc.MOV(resloc, addr_add_const(base_loc, ofs_length)) + self.mc.MOV(resloc, + addr_add_const(self.SEGMENT_GC, base_loc, ofs_length)) def genop_arraylen_gc(self, op, arglocs, resloc): base_loc, ofs_loc = arglocs assert isinstance(ofs_loc, ImmedLoc) - self.mc.MOV(resloc, addr_add_const(base_loc, ofs_loc.value)) + self.mc.MOV(resloc, + addr_add_const(self.SEGMENT_GC, base_loc, ofs_loc.value)) def genop_strgetitem(self, op, arglocs, resloc): base_loc, ofs_loc = arglocs @@ -2215,7 +2218,8 @@ assert loc_base is ebp loc = self.raw_stack(descr.jit_wb_if_flag_byteofs) else: - loc = addr_add_const(loc_base, descr.jit_wb_if_flag_byteofs) + loc = addr_add_const(self.SEGMENT_GC, loc_base, + descr.jit_wb_if_flag_byteofs) mc.TEST8(loc, imm(mask)) mc.J_il8(rx86.Conditions['Z'], 0) # patched later jz_location = mc.get_relative_pos() @@ -2284,7 +2288,7 @@ # XOR tmp, -8 mc.XOR_ri(tmp1.value, -8) # BTS [loc_base], tmp - mc.BTS(addr_add_const(loc_base, 0), tmp1) + mc.BTS(addr_add_const(self.SEGMENT_GC, loc_base, 0), tmp1) # done if final_pop: mc.POP_r(loc_index.value) @@ -2293,7 +2297,8 @@ byte_index = loc_index.value >> descr.jit_wb_card_page_shift byte_ofs = ~(byte_index >> 3) byte_val = 1 << (byte_index & 7) - mc.OR8(addr_add_const(loc_base, byte_ofs), imm(byte_val)) + mc.OR8(addr_add_const(self.SEGMENT_GC, loc_base, byte_ofs), + imm(byte_val)) else: raise AssertionError("index is neither RegLoc nor ImmedLoc") # From noreply at buildbot.pypy.org Sun Mar 23 16:22:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 16:22:47 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fix Message-ID: <20140323152247.7A5741C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70205:a37be1ce64d8 Date: 2014-03-23 16:22 +0100 http://bitbucket.org/pypy/pypy/changeset/a37be1ce64d8/ Log: fix diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -299,6 +299,9 @@ def __init__(self, address): self.value = address + def value_j(self): + return (rx86.SEGMENT_NO, self.value) + def get_width(self): return 8 From noreply at buildbot.pypy.org Sun Mar 23 16:23:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 16:23:43 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fix Message-ID: <20140323152343.B06281C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70206:e9791bb17c6c Date: 2014-03-23 16:23 +0100 http://bitbucket.org/pypy/pypy/changeset/e9791bb17c6c/ Log: fix diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1540,9 +1540,9 @@ src_addr = addr_add(segment, base_loc, ofs_loc, ofs.value, scale) self.load_from_mem(resloc, src_addr, size_loc, sign_loc) - def genop_getarrayitem_gc(self, op, arglocs, resloc, segment): + def genop_getarrayitem_gc(self, op, arglocs, resloc): self._genop_getarrayitem(arglocs, resloc, self.SEGMENT_GC) - def genop_getarrayitem_raw(self, op, arglocs, resloc, segment): + def genop_getarrayitem_raw(self, op, arglocs, resloc): self._genop_getarrayitem(arglocs, resloc, self.SEGMENT_NO) genop_getarrayitem_gc_pure = genop_getarrayitem_gc From noreply at buildbot.pypy.org Sun Mar 23 16:27:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 16:27:35 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fixes Message-ID: <20140323152735.D0C951C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70207:e585aef6450f Date: 2014-03-23 16:27 +0100 http://bitbucket.org/pypy/pypy/changeset/e585aef6450f/ Log: fixes diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1549,7 +1549,7 @@ genop_getarrayitem_raw_pure = genop_getarrayitem_raw def genop_raw_load(self, op, arglocs, resloc): - assert not isinstance(op.getarg(0), BoxPtr) # not for a GC argument! + assert op.getarg(0).type == INT # only for a GC argument! base_loc, ofs_loc, size_loc, ofs, sign_loc = arglocs assert isinstance(ofs, ImmedLoc) src_addr = addr_add(self.SEGMENT_NO, base_loc, ofs_loc, ofs.value, 0) @@ -1571,11 +1571,11 @@ itemsize >>= shift # if valid_addressing_size(itemsize - 1): - mc.LEA_ra(targetreg, (sourcereg, sourcereg, + mc.LEA_ra(targetreg, (self.SEGMENT_NO, sourcereg, sourcereg, get_scale(itemsize - 1), 0)) elif valid_addressing_size(itemsize): - mc.LEA_ra(targetreg, (rx86.NO_BASE_REGISTER, sourcereg, - get_scale(itemsize), 0)) + mc.LEA_ra(targetreg, (self.SEGMENT_NO, rx86.NO_BASE_REGISTER, + sourcereg, get_scale(itemsize), 0)) else: mc.IMUL_rri(targetreg, sourcereg, itemsize) # @@ -1602,7 +1602,7 @@ shift, ofs_loc.value) def genop_getinteriorfield_gc(self, op, arglocs, resloc): - assert not isinstance(op.getarg(0), BoxInt) # only for a GC argument! + assert op.getarg(0).type == REF # only for a GC argument! (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, index_loc, temp_loc, sign_loc) = arglocs src_addr = self._get_interiorfield_addr(temp_loc, index_loc, @@ -1630,13 +1630,13 @@ self._genop_discard_setfield(arglocs, self.SEGMENT_NO) def genop_discard_setinteriorfield_gc(self, op, arglocs): - assert not isinstance(op.getarg(0), BoxInt) # only for a GC argument! + assert op.getarg(0).type == REF # only for a GC argument! (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, index_loc, temp_loc, value_loc) = arglocs dest_addr = self._get_interiorfield_addr(temp_loc, index_loc, itemsize_loc, base_loc, ofs_loc) - self.save_into_mem(dest_addr, value_loc, fieldsize_loc, op) + self.save_into_mem(dest_addr, value_loc, fieldsize_loc) def _genop_discard_setarrayitem(self, arglocs, segment): base_loc, ofs_loc, value_loc, size_loc, baseofs = arglocs @@ -1653,7 +1653,7 @@ self._genop_discard_setarrayitem(arglocs, self.SEGMENT_NO) def genop_discard_raw_store(self, op, arglocs): - assert not isinstance(op.getarg(0), BoxPtr) # not for a GC argument! + assert op.getarg(0).type == INT # only for a GC argument! base_loc, ofs_loc, value_loc, size_loc, baseofs = arglocs assert isinstance(baseofs, ImmedLoc) dest_addr = AddressLoc(self.SEGMENT_NO, base_loc, ofs_loc, From noreply at buildbot.pypy.org Sun Mar 23 16:31:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 16:31:35 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fixes Message-ID: <20140323153135.BDD3E1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70208:f05956b0d800 Date: 2014-03-23 16:30 +0100 http://bitbucket.org/pypy/pypy/changeset/f05956b0d800/ Log: fixes diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -861,9 +861,11 @@ pass # no stack check (e.g. not translated) else: endaddr, lengthaddr, _ = self.cpu.insert_stack_check() - self.mc.MOV(eax, heap(endaddr)) # MOV eax, [start] + assert not self.cpu.gc_ll_descr.stm, "XXX check heap()" + S = self.SEGMENT_NO + self.mc.MOV(eax, heap(S, endaddr)) # MOV eax, [start] self.mc.SUB(eax, esp) # SUB eax, current - self.mc.CMP(eax, heap(lengthaddr)) # CMP eax, [length] + self.mc.CMP(eax, heap(S, lengthaddr)) # CMP eax, [length] self.mc.J_il8(rx86.Conditions['BE'], 0) # JBE .skip jb_location = self.mc.get_relative_pos() self.mc.CALL(imm(self.stack_check_slowpath))# CALL slowpath @@ -1283,11 +1285,13 @@ def genop_float_neg(self, op, arglocs, resloc): # Following what gcc does: res = x ^ 0x8000000000000000 - self.mc.XORPD(arglocs[0], heap(self.float_const_neg_addr)) + self.mc.XORPD(arglocs[0], + heap(self.SEGMENT_NO, self.float_const_neg_addr)) def genop_float_abs(self, op, arglocs, resloc): # Following what gcc does: res = x & 0x7FFFFFFFFFFFFFFF - self.mc.ANDPD(arglocs[0], heap(self.float_const_abs_addr)) + self.mc.ANDPD(arglocs[0], + heap(self.SEGMENT_NO, self.float_const_abs_addr)) def genop_cast_float_to_int(self, op, arglocs, resloc): self.mc.CVTTSD2SI(resloc, arglocs[0]) @@ -1782,7 +1786,7 @@ def _restore_exception(self, mc, excvalloc, exctploc, tmploc=None): # for _build_wb_slowpath(): don't touch the cpu flags! if excvalloc is not None: - mc.MOV(heap(self.cpu.pos_exc_value()), excvalloc) + mc.MOV(self.heap_tl(self.cpu.pos_exc_value()), excvalloc) else: assert tmploc is not None ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') From noreply at buildbot.pypy.org Sun Mar 23 16:34:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 16:34:10 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fixes Message-ID: <20140323153410.BC5B11C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70209:d80633d4a761 Date: 2014-03-23 16:33 +0100 http://bitbucket.org/pypy/pypy/changeset/d80633d4a761/ Log: fixes diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1557,7 +1557,7 @@ base_loc, ofs_loc, size_loc, ofs, sign_loc = arglocs assert isinstance(ofs, ImmedLoc) src_addr = addr_add(self.SEGMENT_NO, base_loc, ofs_loc, ofs.value, 0) - self.load_from_mem(resloc, src_addr, size_loc, sign_loc, op) + self.load_from_mem(resloc, src_addr, size_loc, sign_loc) def _imul_const_scaled(self, mc, targetreg, sourcereg, itemsize): """Produce one operation to do roughly diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -131,7 +131,7 @@ rstm.stop_all_other_threads() for addr, tgt in looptoken.compiled_loop_token.invalidate_positions: - mc = codebuf.MachineCodeBlockWrapper(self) + mc = codebuf.MachineCodeBlockWrapper() mc.JMP_l(tgt) assert mc.get_relative_pos() == 5 # [JMP] [tgt 4 bytes] mc.copy_to_raw_memory(addr - 1) From noreply at buildbot.pypy.org Sun Mar 23 16:40:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 16:40:00 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: test (locally in the source) and fix Message-ID: <20140323154000.50A191C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70210:93660d35aeb4 Date: 2014-03-23 16:39 +0100 http://bitbucket.org/pypy/pypy/changeset/93660d35aeb4/ Log: test (locally in the source) and fix diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -481,11 +481,11 @@ val1 = getattr(loc1, "value_" + possible_code1)() # More faking out of certain operations for x86_64 fits32 = rx86.fits_in_32bits - if possible_code1 == 'j' and not fits32(val1): + if possible_code1 == 'j' and not fits32(val1[1]): val1 = self._addr_as_reg_offset(val1) invoke(self, "m" + possible_code2, val1, val2) return - if possible_code2 == 'j' and not fits32(val2): + if possible_code2 == 'j' and not fits32(val2[1]): val2 = self._addr_as_reg_offset(val2) invoke(self, possible_code1 + "m", val1, val2) return @@ -515,7 +515,7 @@ self._load_scratch(val) # for 'PUSH(imm)' _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value) return - if possible_code == 'j' and not fits32(val): + if possible_code == 'j' and not fits32(val[1]): val = self._addr_as_reg_offset(val) _rx86_getattr(self, name + "_m")(val) return diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -47,6 +47,7 @@ return -128 <= value < 128 def fits_in_32bits(value): + value + 0 # check that it's an integer; crashes if we receive a tuple return -2147483648 <= value <= 2147483647 SEGMENT_NO = '\x00' From noreply at buildbot.pypy.org Sun Mar 23 16:43:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 16:43:25 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fixes Message-ID: <20140323154325.D405B1C054C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70211:db9c0c61d0c6 Date: 2014-03-23 16:41 +0100 http://bitbucket.org/pypy/pypy/changeset/db9c0c61d0c6/ Log: fixes diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -243,7 +243,7 @@ scale, static_offset) def __repr__(self): - dict = {'j': 'value', 'a': 'loc_a', 'm': 'loc_m', 'a':'loc_a'} + dict = {'j': 'loc_j', 'a': 'loc_a', 'm': 'loc_m', 'a':'loc_a'} attr = dict.get(self._location_code, '?') info = getattr(self, attr, '?') return '' % (self._location_code, info) @@ -402,7 +402,7 @@ val2 = loc2.value_i() code1 = loc1.location_code() if code1 == 'j': - checkvalue = loc1.value_j() + checkvalue = loc1.value_j()[1] elif code1 == 'm': checkvalue = loc1.value_m()[2] elif code1 == 'a': @@ -457,7 +457,7 @@ if loc2 is X86_64_SCRATCH_REG: if code1 == 'j': assert (name.startswith("MOV") and - rx86.fits_in_32bits(loc1.value_j())) + rx86.fits_in_32bits(loc1.value_j()[1])) if loc1 is X86_64_SCRATCH_REG and not name.startswith("MOV"): assert code2 not in ('j', 'i') From noreply at buildbot.pypy.org Sun Mar 23 16:46:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 16:46:18 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fixes Message-ID: <20140323154618.26BFB1C12A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70212:a20b4b8dbf27 Date: 2014-03-23 16:43 +0100 http://bitbucket.org/pypy/pypy/changeset/a20b4b8dbf27/ Log: fixes diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2421,12 +2421,13 @@ if self.cpu.gc_ll_descr.stm: constsize = self.cpu.get_baseofs_of_frame_field() shift = get_scale(WORD) - self.mc.LEA_ra(edi.value, (eax.value, sizeloc.value, shift, - constsize)) + self.mc.LEA_ra(edi.value, (self.SEGMENT_NO, eax.value, + sizeloc.value, shift, constsize)) elif sizeloc is edi: self.mc.ADD_rr(edi.value, eax.value) else: - self.mc.LEA_ra(edi.value, (eax.value, sizeloc.value, 0, 0)) + self.mc.LEA_ra(edi.value, (self.SEGMENT_NO, eax.value, + sizeloc.value, 0, 0)) self.mc.CMP(edi, heap(self.SEGMENT_GC, nursery_top_adr)) self.mc.J_il8(rx86.Conditions['NA'], 0) # patched later jmp_adr = self.mc.get_relative_pos() @@ -2470,8 +2471,8 @@ force_realignment = (itemsize % WORD) != 0 if force_realignment: constsize += WORD - 1 - self.mc.LEA_ra(edi.value, (eax.value, varsizeloc.value, shift, - constsize)) + self.mc.LEA_ra(edi.value, (self.SEGMENT_NO, eax.value, + varsizeloc.value, shift, constsize)) if force_realignment: self.mc.AND_ri(edi.value, ~(WORD - 1)) # now edi contains the total size in bytes, rounded up to a multiple From noreply at buildbot.pypy.org Sun Mar 23 16:49:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 16:49:09 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: comment Message-ID: <20140323154909.6E2081C12A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70213:802bc9683f29 Date: 2014-03-23 16:48 +0100 http://bitbucket.org/pypy/pypy/changeset/802bc9683f29/ Log: comment diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -570,6 +570,8 @@ POP_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1)) POP_m = insn(rex_nw, '\x8F', orbyte(0<<3), mem_reg_plus_const(1)) + # note: the segment specified in LEA should always be SEGMENT_NO; + # if instead you give it a SEGMENT_*S, it is ignored LEA_rb = insn(rex_w, '\x8D', register(1,8), stack_bp(2)) LEA_rs = insn(rex_w, '\x8D', register(1,8), stack_sp(2)) LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True)) From noreply at buildbot.pypy.org Sun Mar 23 16:59:49 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 23 Mar 2014 16:59:49 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: win32 uses ll_os functions for rposix calls, which work differently Message-ID: <20140323155949.584041C33B0@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70214:cf76d42b70fb Date: 2014-03-23 17:58 +0200 http://bitbucket.org/pypy/pypy/changeset/cf76d42b70fb/ Log: win32 uses ll_os functions for rposix calls, which work differently - no as_bytes() on str args, - float/double differences with stat() diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -25,7 +25,7 @@ def as_unicode(self): return self.unistr -class BasePosixUnicode: +class BasePosixUnicodeOrAscii: def setup_method(self, method): self.ufilename = self._get_filename() try: @@ -34,9 +34,12 @@ py.test.skip("encoding not good enough") f.write("test") f.close() - - self.path = UnicodeWithEncoding(self.ufilename) - self.path2 = UnicodeWithEncoding(self.ufilename + ".new") + if sys.platform == 'win32' and isinstance(self.ufilename, str): + self.path = self.ufilename + self.path2 = self.ufilename + ".new" + else: + self.path = UnicodeWithEncoding(self.ufilename) + self.path2 = UnicodeWithEncoding(self.ufilename + ".new") def test_open(self): def f(): @@ -55,8 +58,11 @@ def test_stat(self): def f(): return rposix.stat(self.path).st_mtime - - assert interpret(f, []) == os.stat(self.ufilename).st_mtime + if sys.platform == 'win32': + #double vs. float, be satisfied with sub-millisec resolution + assert abs(interpret(f, []) - os.stat(self.ufilename).st_mtime) < 1e-4 + else: + assert interpret(f, []) == os.stat(self.ufilename).st_mtime def test_access(self): def f(): @@ -96,7 +102,11 @@ if sys.platform == 'win32': def f(): - return u', '.join(rposix.listdir(udir)) + if isinstance(udir.as_unicode(), str): + _udir = udir.as_unicode() + else: + _udir = udir + return u', '.join(rposix.listdir(_udir)) result = interpret(f, []) assert os.path.basename(self.ufilename) in ll_to_string(result) else: @@ -149,11 +159,11 @@ interpret(f, []) # does not crash -class TestPosixAscii(BasePosixUnicode): +class TestPosixAscii(BasePosixUnicodeOrAscii): def _get_filename(self): return str(udir.join('test_open_ascii')) -class TestPosixUnicode(BasePosixUnicode): +class TestPosixUnicode(BasePosixUnicodeOrAscii): def _get_filename(self): return (unicode(udir.join('test_open')) + u'\u65e5\u672c.txt') # "Japan" From noreply at buildbot.pypy.org Sun Mar 23 17:12:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 17:12:38 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: bug fix Message-ID: <20140323161238.8AA081C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70215:9f7f5339c0f4 Date: 2014-03-23 17:02 +0100 http://bitbucket.org/pypy/pypy/changeset/9f7f5339c0f4/ Log: bug fix diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -893,7 +893,7 @@ # it off the shadowstack rst = self.heap_tl(gcrootmap.get_root_stack_top_addr()) self.mc.MOV(ebx, rst) - self.mc.SUB_ri(ebx.value, -WORD) + self.mc.SUB_ri(ebx.value, WORD) self.mc.MOV_rm(eax.value, (self.SEGMENT_NO, ebx.value, 0)) self.mc.MOV(rst, ebx) else: From noreply at buildbot.pypy.org Sun Mar 23 17:35:00 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 23 Mar 2014 17:35:00 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: clibffi minimal tests fix Message-ID: <20140323163500.6005F1C3434@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70216:1dcc8039fbaf Date: 2014-03-23 18:34 +0200 http://bitbucket.org/pypy/pypy/changeset/1dcc8039fbaf/ Log: clibffi minimal tests fix diff --git a/rpython/rlib/test/test_clibffi.py b/rpython/rlib/test/test_clibffi.py --- a/rpython/rlib/test/test_clibffi.py +++ b/rpython/rlib/test/test_clibffi.py @@ -423,11 +423,12 @@ def setup_class(cls): if sys.platform != 'win32': py.test.skip("Handle to libc library, Win-only test") - BaseFfiTest.setup_class(cls) + BaseFfiTest.setup_class() def test_get_libc_handle(self): handle = get_libc_handle() print get_libc_name() - print hex(handle) - assert handle != 0 - assert handle % 0x1000 == 0 + print dir(handle) + addr = rffi.cast(rffi.INT, handle) + assert addr != 0 + assert addr % 0x1000 == 0 From noreply at buildbot.pypy.org Sun Mar 23 17:59:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 17:59:41 +0100 (CET) Subject: [pypy-commit] pypy default: Uh, seems that either I'm confused or we wasted one word in the JIT Message-ID: <20140323165941.322691C12A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70217:a44d490bf21c Date: 2014-03-23 17:53 +0100 http://bitbucket.org/pypy/pypy/changeset/a44d490bf21c/ Log: Uh, seems that either I'm confused or we wasted one word in the JIT frames. Not that I care too much, but trying to re-understand the meaning of these constants is not that easy. diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -15,12 +15,12 @@ # # +--------------------+ <== aligned to 16 bytes # | return address | -# +--------------------+ -# | saved regs | -# +--------------------+ -# | scratch | -# | space | -# +--------------------+ <== aligned to 16 bytes +# +--------------------+ ----------------------. +# | saved regs | FRAME_FIXED_SIZE | +# +--------------------+ --------------------. | +# | scratch | PASS_ON_MY_FRAME | | +# | space | | | +# +--------------------+ <== aligned to 16 -----' ----' # All the rest of the data is in a GC-managed variable-size "frame". # This frame object's address is always stored in the register EBP/RBP. @@ -30,14 +30,14 @@ # start of every frame: the saved value of some registers if WORD == 4: - # ebp + ebx + esi + edi + 14 extra words + return address = 19 words + # ebp + ebx + esi + edi + 15 extra words = 19 words FRAME_FIXED_SIZE = 19 - PASS_ON_MY_FRAME = 14 + PASS_ON_MY_FRAME = 15 JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float else: - # rbp + rbx + r12 + r13 + r14 + r15 + 12 extra words + return address = 19 + # rbp + rbx + r12 + r13 + r14 + r15 + 13 extra words = 19 FRAME_FIXED_SIZE = 19 - PASS_ON_MY_FRAME = 12 + PASS_ON_MY_FRAME = 13 JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 From noreply at buildbot.pypy.org Sun Mar 23 17:59:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 17:59:42 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: hg merge default Message-ID: <20140323165942.CC7091C12A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70218:09a81b58e77a Date: 2014-03-23 17:58 +0100 http://bitbucket.org/pypy/pypy/changeset/09a81b58e77a/ Log: hg merge default diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -70,14 +70,14 @@ p13 = new(descr=...) p15 = new_array(8, descr=) setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) setfield_gc(p13, 16, descr=) guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) guard_no_exception(descr=...) i26 = int_and(i23, .*) i27 = int_is_true(i26) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -141,15 +141,16 @@ i = 0 b = B(1) while i < 100: - b.x - v = b.x # ID: loadattr + v = b.x # ID: loadattr1 + v = b.x # ID: loadattr2 i += v return i log = self.run(main, [], threshold=80) loop, = log.loops_by_filename(self.filepath) - assert loop.match_by_id('loadattr', + assert loop.match_by_id('loadattr1', ''' + guard_not_invalidated(descr=...) i19 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...) guard_no_exception(descr=...) i21 = int_and(i19, _) @@ -161,6 +162,7 @@ i29 = int_is_true(i28) guard_true(i29, descr=...) ''') + assert loop.match_by_id('loadattr2', "") # completely folded away def test_python_contains(self): def main(): diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -584,7 +584,10 @@ emit_op_getfield_gc_pure = emit_op_getfield_gc def emit_op_increment_debug_counter(self, op, arglocs, regalloc, fcond): - # XXX implement me + base_loc, value_loc = arglocs + self.mc.LDR_ri(value_loc.value, base_loc.value, 0, cond=fcond) + self.mc.ADD_ri(value_loc.value, value_loc.value, 1, cond=fcond) + self.mc.STR_ri(value_loc.value, base_loc.value, 0, cond=fcond) return fcond def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -850,8 +850,12 @@ prepare_op_getfield_gc_pure = prepare_op_getfield_gc def prepare_op_increment_debug_counter(self, op, fcond): - # XXX implement me - return [] + boxes = op.getarglist() + a0, = boxes + base_loc = self.make_sure_var_in_reg(a0, boxes) + value_loc = self.get_scratch_reg(INT, boxes) + self.free_temp_vars() + return [base_loc, value_loc] def prepare_op_getinteriorfield_gc(self, op, fcond): t = unpack_interiorfielddescr(op.getdescr()) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -556,7 +556,7 @@ p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) setfield_gc(p1, 10, descr=unicodelendescr) - p2 = call_malloc_nursery_varsize(2, 4, i2, \ + p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\ descr=unicodedescr) setfield_gc(p2, i2, descr=unicodelendescr) p3 = call_malloc_nursery_varsize(1, 1, i2, \ diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -15,14 +15,14 @@ # # +--------------------+ <== aligned to 16 bytes # | return address | -# +--------------------+ -# | STM resume buf | (16 extra bytes, only with STM) -# +--------------------+ -# | saved regs | -# +--------------------+ -# | scratch | -# | space | -# +--------------------+ <== aligned to 16 bytes +# +--------------------+ ------------------------. +# | resume buf (if STM)| STM_FRAME_FIXED_SIZE | +# +--------------------+ ----------------------. | +# | saved regs | FRAME_FIXED_SIZE | | +# +--------------------+ --------------------. | | +# | scratch | PASS_ON_MY_FRAME | | | +# | space | | | | +# +--------------------+ <== aligned to 16 -----' ----' ----' # STACK TOP # All the rest of the data is in a GC-managed variable-size "frame". @@ -33,16 +33,17 @@ # start of every frame: the saved value of some registers if WORD == 4: - # ebp + ebx + esi + edi + 14 extra words + return address = 19 words + # ebp + ebx + esi + edi + 15 extra words = 19 words FRAME_FIXED_SIZE = 19 - PASS_ON_MY_FRAME = 14 + PASS_ON_MY_FRAME = 15 JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float else: - # rbp + rbx + r12 + r13 + r14 + r15 + 12 extra words + return address = 19 + # rbp + rbx + r12 + r13 + r14 + r15 + 13 extra words = 19 FRAME_FIXED_SIZE = 19 - PASS_ON_MY_FRAME = 12 + PASS_ON_MY_FRAME = 13 JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 STM_RESUME_BUF = 16 / WORD +STM_FRAME_FIXED_SIZE = FRAME_FIXED_SIZE + STM_RESUME_BUF diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -106,8 +106,10 @@ needs_inevitable=False): key = (frozenset_or_none(readonly_descrs_fields), frozenset_or_none(readonly_descrs_arrays), + frozenset_or_none(readonly_descrs_interiorfields), frozenset_or_none(write_descrs_fields), frozenset_or_none(write_descrs_arrays), + frozenset_or_none(write_descrs_interiorfields), extraeffect, oopspecindex, can_invalidate, @@ -231,6 +233,18 @@ descr = cpu.interiorfielddescrof(T, fieldname) descrs_interiorfields.append(descr) + # a read or a write to an interiorfield, inside an array of + # structs, is additionally recorded as a read or write of + # the array itself + extraef = set() + for tup in effects: + if tup[0] == "interiorfield" or tup[0] == "readinteriorfield": + T = deref(tup[1]) + if isinstance(T, lltype.Array) and consider_array(T): + extraef.add((tup[0].replace("interiorfield", "array"), + tup[1])) + effects |= extraef + for tup in effects: if tup[0] == "struct": add_struct(write_descrs_fields, tup) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1868,8 +1868,7 @@ def _handle_dict_lookup_call(self, op, oopspec_name, args): extradescr1 = self.cpu.fielddescrof(op.args[1].concretetype.TO, 'entries') - extradescr2 = self.cpu.interiorfielddescrof( - op.args[1].concretetype.TO.entries.TO, 'key') + extradescr2 = self.cpu.arraydescrof(op.args[1].concretetype.TO.entries.TO) return self._handle_oopspec_call(op, args, EffectInfo.OS_DICT_LOOKUP, extradescr=[extradescr1, extradescr2]) diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -73,7 +73,7 @@ def guess_call_kind(self, op): return 'residual' def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None): + extraeffect=None, extradescr=None): try: name = op.args[0].value._obj._name if 'cannot_raise' in name or name.startswith('cast_'): diff --git a/rpython/jit/codewriter/test/test_longlong.py b/rpython/jit/codewriter/test/test_longlong.py --- a/rpython/jit/codewriter/test/test_longlong.py +++ b/rpython/jit/codewriter/test/test_longlong.py @@ -17,7 +17,7 @@ class FakeBuiltinCallControl: def guess_call_kind(self, op): return 'builtin' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None, extradescr=None): assert oopspecindex is not None # in this test return 'calldescr-%d' % oopspecindex def calldescr_canraise(self, calldescr): diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -82,6 +82,9 @@ def _make_log_operations(self): return LogOperations(self.metainterp_sd, self.guard_number) + def repr_of_resop(self, op): + return LogOperations(self.metainterp_sd, self.guard_number).repr_of_resop(op) + class LogOperations(object): """ diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -177,7 +177,7 @@ self.cached_arrayitems = {} # cached dict items: {dict descr: {(optval, index): box-or-const}} self.cached_dict_reads = {} - # cache of corresponding array descrs + # cache of corresponding {array descrs: dict 'entries' field descr} self.corresponding_array_descrs = {} # self._lazy_setfields_and_arrayitems = [] @@ -309,12 +309,11 @@ descrs = op.getdescr().get_extra_info().extradescrs assert descrs # translation hint descr1 = descrs[0] - descr2 = descrs[1] - if descr1 in self.cached_dict_reads: + try: d = self.cached_dict_reads[descr1] - else: + except KeyError: d = self.cached_dict_reads[descr1] = args_dict() - self.corresponding_array_descrs[descr2] = descr1 + self.corresponding_array_descrs[descrs[1]] = descr1 args = self.optimizer.make_args_key(op) try: res_v = d[args] @@ -348,9 +347,8 @@ self.force_lazy_setfield(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) - for descr in effectinfo.write_descrs_interiorfields: - if descr in self.corresponding_array_descrs: - dictdescr = self.corresponding_array_descrs.pop(descr) + if arraydescr in self.corresponding_array_descrs: + dictdescr = self.corresponding_array_descrs.pop(arraydescr) try: del self.cached_dict_reads[dictdescr] except KeyError: diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -242,8 +242,9 @@ box = value.box assert isinstance(box, Const) if not box.same_constant(constbox): - raise InvalidLoop('A GUARD_{VALUE,TRUE,FALSE} was proven to' + - 'always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_{VALUE,TRUE,FALSE} (%s) was proven ' + 'to always fail' % r) return if emit_operation: self.emit_operation(op) @@ -255,7 +256,9 @@ if value.is_null(): return elif value.is_nonnull(): - raise InvalidLoop('A GUARD_ISNULL was proven to always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_ISNULL (%s) was proven to always fail' + % r) self.emit_operation(op) value.make_constant(self.optimizer.cpu.ts.CONST_NULL) @@ -264,7 +267,9 @@ if value.is_nonnull(): return elif value.is_null(): - raise InvalidLoop('A GUARD_NONNULL was proven to always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_NONNULL (%s) was proven to always fail' + % r) self.emit_operation(op) value.make_nonnull(op) @@ -292,7 +297,8 @@ assert previous_classbox is not None assert expected_classbox is not None if not previous_classbox.same_constant(expected_classbox): - raise InvalidLoop('A GUARD_VALUE was proven to always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_VALUE (%s) was proven to always fail' % r) op = old_guard_op.copy_and_change(rop.GUARD_VALUE, args = [old_guard_op.getarg(0), op.getarg(1)]) self.optimizer.replaces_guard[op] = old_guard_op @@ -333,7 +339,9 @@ if realclassbox is not None: if realclassbox.same_constant(expectedclassbox): return - raise InvalidLoop('A GUARD_CLASS was proven to always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_CLASS (%s) was proven to always fail' + % r) if value.last_guard: # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value. @@ -356,8 +364,9 @@ def optimize_GUARD_NONNULL_CLASS(self, op): value = self.getvalue(op.getarg(0)) if value.is_null(): - raise InvalidLoop('A GUARD_NONNULL_CLASS was proven to always ' + - 'fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_NONNULL_CLASS (%s) was proven to ' + 'always fail' % r) self.optimize_GUARD_CLASS(op) def optimize_CALL_LOOPINVARIANT(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -319,6 +319,9 @@ def log_loop(*args): pass + class logger_ops: + repr_of_resop = repr + class warmrunnerdesc: class memory_manager: retrace_limit = 5 diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -139,7 +139,13 @@ txt1 = str(op1) txt2 = str(op2) while txt1 or txt2: - print '%s| %s' % (txt1[:width].ljust(width), txt2[:width]) + part1 = txt1[:width] + part2 = txt2[:width] + if part1 == part2: + sep = '| ' + else: + sep = '<>' + print '%s%s%s' % (part1.ljust(width), sep, part2) txt1 = txt1[width:] txt2 = txt2[width:] print '-' * totwidth diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -294,6 +294,54 @@ assert res == f(10) self.check_simple_loop(call=3) + def test_dict_eq_can_release_gil(self): + from rpython.rtyper.lltypesystem import lltype, rffi + if type(self.newdict()) is not dict: + py.test.skip("this is an r_dict test") + T = rffi.CArrayPtr(rffi.TIME_T) + external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True) + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def key(x): + return x % 2 + def eq(x, y): + external(lltype.nullptr(T.TO)) + return (x % 2) == (y % 2) + + def f(n): + dct = objectmodel.r_dict(eq, key) + total = n + x = 44444 + y = 55555 + z = 66666 + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + dct[total] = total + x = dct[total] + y = dct[total] + z = dct[total] + total -= 1 + return len(dct) + x + y + z + + res = self.meta_interp(f, [10], listops=True) + assert res == 2 + 1 + 1 + 1 + self.check_simple_loop(call_may_force=4, # ll_dict_lookup_trampoline + call=1) # ll_dict_setitem_lookup_done_trampoline + + def test_bug42(self): + myjitdriver = JitDriver(greens = [], reds = 'auto') + def f(n): + mdict = {0: None, 1: None, 2: None, 3: None, 4: None, + 5: None, 6: None, 7: None, 8: None, 9: None} + while n > 0: + myjitdriver.jit_merge_point() + n -= 1 + if n in mdict: + del mdict[n] + if n in mdict: + raise Exception + self.meta_interp(f, [10]) + self.check_simple_loop(call_may_force=0, call=3) + class TestLLtype(DictTests, LLJitMixin): pass From noreply at buildbot.pypy.org Sun Mar 23 18:39:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Mar 2014 18:39:12 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140323173912.562771C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70219:98b2f4f06e5e Date: 2014-03-23 18:38 +0100 http://bitbucket.org/pypy/pypy/changeset/98b2f4f06e5e/ Log: in-progress diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -45,5 +45,15 @@ assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 -STM_RESUME_BUF = 16 / WORD -STM_FRAME_FIXED_SIZE = FRAME_FIXED_SIZE + STM_RESUME_BUF +# The STM resume buffer (on x86-64) is two words wide. Actually, clang +# uses three words (see test_stm.py): rbp, rip, rsp. But the value of +# rbp is not interesting for the JIT-generated machine code. So the +# STM_JMPBUF_OFS is the offset from the stack top to the start of the +# buffer, with only words at offset +1 and +2 in this buffer being +# meaningful -- these are the two words overlapping the STM resume +# buffer's location in the diagram above. +STM_RESUME_BUF_WORDS = 16 / WORD +STM_FRAME_FIXED_SIZE = FRAME_FIXED_SIZE + STM_RESUME_BUF_WORDS +STM_JMPBUF_OFS = WORD * (FRAME_FIXED_SIZE - 1) +STM_JMPBUF_OFS_RIP = STM_JMPBUF_OFS + 1 * WORD +STM_JMPBUF_OFS_RSP = STM_JMPBUF_OFS + 2 * WORD diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -16,9 +16,9 @@ from rpython.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, gpr_reg_mgr_cls, xmm_reg_mgr_cls) from rpython.jit.backend.llsupport.regalloc import (get_scale, valid_addressing_size) -from rpython.jit.backend.x86.arch import (FRAME_FIXED_SIZE, WORD, IS_X86_64, - JITFRAME_FIXED_SIZE, IS_X86_32, - PASS_ON_MY_FRAME, STM_RESUME_BUF) +from rpython.jit.backend.x86.arch import ( + FRAME_FIXED_SIZE, WORD, IS_X86_64, JITFRAME_FIXED_SIZE, IS_X86_32, + PASS_ON_MY_FRAME, STM_FRAME_FIXED_SIZE) from rpython.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, r8, r9, r10, r11, edi, r12, r13, r14, r15, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, @@ -503,37 +503,6 @@ self.wb_slowpath[withcards + 2 * withfloats] = rawstart - def _build_stm_longjmp_callback(self): - assert self.cpu.gc_ll_descr.stm - if not we_are_translated(): - return # tests only - # - # make the stm_longjmp_callback() function, with signature - # void (*longjmp_callback)(void *stm_resume_buffer) - mc = codebuf.MachineCodeBlockWrapper() - # - # 'edi' contains the stm resume buffer, so the new stack - # location that we have to enforce is 'edi - FRAME_FIXED_SIZE * WORD'. - if IS_X86_32: - mc.MOV_rs(edi.value, WORD) # first argument - mc.MOV_rr(esp.value, edi.value) - mc.SUB_ri(esp.value, FRAME_FIXED_SIZE * WORD) - # - # restore the shadowstack pointer from stm_resume_buffer[1] - gcrootmap = self.cpu.gc_ll_descr.gcrootmap - mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE + 1) * WORD) - mc.MOV(self.heap_tl(gcrootmap.get_root_stack_top_addr()), eax) - # - # must restore 'ebp' from its saved value in the shadowstack - self._reload_frame_if_necessary(mc) - # - # jump to the place saved in stm_resume_buffer[0] - # (to "HERE" in genop_stm_transaction_break()) - mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE + 0) * WORD) - mc.JMP_r(eax.value) - self.stm_longjmp_callback_addr = mc.materialize(self.cpu.asmmemmgr, []) - - @rgc.no_release_gil def assemble_loop(self, inputargs, operations, looptoken, log, loopname, logger): @@ -835,10 +804,10 @@ return frame_depth def _get_whole_frame_size(self): - frame_size = FRAME_FIXED_SIZE if self.cpu.gc_ll_descr.stm: - frame_size += STM_RESUME_BUF - return frame_size + return STM_FRAME_FIXED_SIZE + else: + return FRAME_FIXED_SIZE def _call_header(self): self.mc.SUB_ri(esp.value, self._get_whole_frame_size() * WORD) @@ -881,27 +850,42 @@ # call stm_invalidate_jmp_buf(), in case we called # stm_transaction_break() earlier assert IS_X86_64 - # load the address of the STM_RESUME_BUF - self.mc.LEA_rs(edi.value, FRAME_FIXED_SIZE * WORD) + # + # load the shadowstack pointer into ebx, and decrement it, + # but don't decrement the official shadowstack yet! We just + # keep it in ebx for a while (a callee-saved register). + mc = self.mc + rst = self.heap_tl(gcrootmap.get_root_stack_top_addr()) + mc.MOV(ebx, rst) + mc.SUB_ri(ebx.value, WORD) + # load the address of the jmpbuf + mc.LEA_rs(edi.value, STM_JMPBUF_OFS) + # compare it with the currently-stored jmpbuf + mc.CMP_rj(edi.value, (self.SEGMENT_GC, rstm.adr_jmpbuf_ptr)) + # if they differ (or if jmpbuf_ptr is already NULL), nothing to do + mc.J_il(rx86.Conditions['NE'], 0) # patched later + jne_location = mc.get_relative_pos() + # + # if they are equal, we need to become inevitable now + mc.MOV_rj(edi.value, (self.SEGMENT_NO, rstm.adr_jit_default_msg)) + mc.CALL(imm(rstm.adr__stm_become_inevitable)) + # there could have been a collection in _stm_become_inevitable; + # reload the frame into ebp + mc.MOV_rm(ebp.value, (self.SEGMENT_NO, ebx.value, 0)) + # + # this is where the JNE above jumps + offset = mc.get_relative_pos() - jne_location + assert 0 < offset <= 127 + mc.overwrite(jne_location-1, chr(offset)) + # + # now store ebx back, which will really decrement the shadowstack + mc.MOV(rst, ebx) - # XXX UD2 - #fn = stmtlocal.stm_invalidate_jmp_buf_fn - #self.mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) + elif gcrootmap and gcrootmap.is_shadow_stack: + self._call_footer_shadowstack(gcrootmap) - # there could have been a collection in invalidate_jmp_buf() - # reload the frame into eax, while at the same time popping - # it off the shadowstack - rst = self.heap_tl(gcrootmap.get_root_stack_top_addr()) - self.mc.MOV(ebx, rst) - self.mc.SUB_ri(ebx.value, WORD) - self.mc.MOV_rm(eax.value, (self.SEGMENT_NO, ebx.value, 0)) - self.mc.MOV(rst, ebx) - else: - # the return value is the jitframe - self.mc.MOV_rr(eax.value, ebp.value) - # - if gcrootmap and gcrootmap.is_shadow_stack: - self._call_footer_shadowstack(gcrootmap) + # the return value is the jitframe + self.mc.MOV_rr(eax.value, ebp.value) for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): self.mc.MOV_rs(self.cpu.CALLEE_SAVE_REGISTERS[i].value, diff --git a/rpython/jit/backend/x86/stmtlocal.py b/rpython/jit/backend/x86/stmtlocal.py --- a/rpython/jit/backend/x86/stmtlocal.py +++ b/rpython/jit/backend/x86/stmtlocal.py @@ -1,4 +1,4 @@ -from rpython.rtyper.lltypesystem import lltype, rffi, llmemory +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.jit.backend.x86.arch import WORD @@ -24,29 +24,3 @@ [], lltype.Signed, compilation_info=eci, _nowrapper=True) - - -def tl_segment_prefix(mc): - if WORD == 4: - mc.writechar('\x65') # %gs: - else: - mc.writechar('\x64') # %fs: - -def c7_segment_prefix(mc): - assert WORD == 8 - mc.writechar('\x65') # %gs: - - -# special STM functions called directly by the JIT backend -stm_should_break_transaction_fn = rffi.llexternal( - 'stm_should_break_transaction', - [], lltype.Bool, - sandboxsafe=True, _nowrapper=True, transactionsafe=True) -stm_transaction_break_fn = rffi.llexternal( - 'stm_transaction_break', - [llmemory.Address, llmemory.Address], lltype.Void, - sandboxsafe=True, _nowrapper=True, transactionsafe=True) -stm_invalidate_jmp_buf_fn = rffi.llexternal( - 'stm_invalidate_jmp_buf', - [llmemory.Address], lltype.Void, - sandboxsafe=True, _nowrapper=True, transactionsafe=True) diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -8,11 +8,18 @@ TID = rffi.UINT tid_offset = CDefinedIntSymbolic('offsetof(struct rpyobj_s, tid)') -adr_nursery_free = CDefinedIntSymbolic('(long)(&STM_SEGMENT->nursery_current)') -adr_nursery_top = CDefinedIntSymbolic('(long)(&STM_SEGMENT->nursery_end)') +adr_nursery_free = CDefinedIntSymbolic('((long)&STM_SEGMENT->nursery_current)') +adr_nursery_top = CDefinedIntSymbolic('((long)&STM_SEGMENT->nursery_end)') adr_transaction_read_version = ( - CDefinedIntSymbolic('(long)(&STM_SEGMENT->transaction_read_version)')) -adr_write_slowpath = CDefinedIntSymbolic('(long)(&_stm_write_slowpath)') + CDefinedIntSymbolic('((long)&STM_SEGMENT->transaction_read_version)')) +adr_jmpbuf_ptr = ( + CDefinedIntSymbolic('((long)&STM_SEGMENT->jmpbuf_ptr)')) +adr_write_slowpath = CDefinedIntSymbolic('((long)&_stm_write_slowpath)') + +adr_jit_default_msg = ( + CDefinedIntSymbolic('((long)"return from JITted function")')) +adr__stm_become_inevitable = ( + CDefinedIntSymbolic('((long)&_stm_become_inevitable)') def jit_stm_transaction_break_point(): From noreply at buildbot.pypy.org Mon Mar 24 01:24:11 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Mon, 24 Mar 2014 01:24:11 +0100 (CET) Subject: [pypy-commit] pypy test_SetFromErrnoWithFilename__tweaks: Tweaks to test_SetFromErrnoWithFilename Message-ID: <20140324002411.D1CEE1C3396@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: test_SetFromErrnoWithFilename__tweaks Changeset: r70220:cbd73b7272c6 Date: 2014-03-23 17:16 -0700 http://bitbucket.org/pypy/pypy/changeset/cbd73b7272c6/ Log: Tweaks to test_SetFromErrnoWithFilename - Use `raises` to ensure that OSError is raised - Don't skip if sys.platform != 'win32' - Use "/path/to/file" instead of "blyf" for filename to make it clearer that it's a filename diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -199,26 +199,21 @@ assert e.filename == None def test_SetFromErrnoWithFilename(self): - import sys - if sys.platform != 'win32': - skip("callbacks through ll2ctypes modify errno") import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' errno = EBADF; - PyErr_SetFromErrnoWithFilename(PyExc_OSError, "blyf"); + PyErr_SetFromErrnoWithFilename(PyExc_OSError, "/path/to/file"); return NULL; '''), ], prologue="#include ") - try: - module.set_from_errno() - except OSError, e: - assert e.filename == "blyf" - assert e.errno == errno.EBADF - assert e.strerror == os.strerror(errno.EBADF) + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == "/path/to/file" + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_PyErr_Display(self): module = self.import_extension('foo', [ From noreply at buildbot.pypy.org Mon Mar 24 01:24:13 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 24 Mar 2014 01:24:13 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in msabramo/pypy/test_SetFromErrnoWithFilename__tweaks (pull request #210) Message-ID: <20140324002413.311FC1C3396@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r70221:556f9ed20ccc Date: 2014-03-23 17:23 -0700 http://bitbucket.org/pypy/pypy/changeset/556f9ed20ccc/ Log: Merged in msabramo/pypy/test_SetFromErrnoWithFilename__tweaks (pull request #210) Tweaks to test_SetFromErrnoWithFilename diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -199,26 +199,21 @@ assert e.filename == None def test_SetFromErrnoWithFilename(self): - import sys - if sys.platform != 'win32': - skip("callbacks through ll2ctypes modify errno") import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' errno = EBADF; - PyErr_SetFromErrnoWithFilename(PyExc_OSError, "blyf"); + PyErr_SetFromErrnoWithFilename(PyExc_OSError, "/path/to/file"); return NULL; '''), ], prologue="#include ") - try: - module.set_from_errno() - except OSError, e: - assert e.filename == "blyf" - assert e.errno == errno.EBADF - assert e.strerror == os.strerror(errno.EBADF) + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == "/path/to/file" + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_PyErr_Display(self): module = self.import_extension('foo', [ From noreply at buildbot.pypy.org Mon Mar 24 07:30:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 07:30:50 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Add __pypy__.thread.getsegmentlimit() Message-ID: <20140324063050.4BD9B1C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70223:4eb53be43a92 Date: 2014-03-24 07:29 +0100 http://bitbucket.org/pypy/pypy/changeset/4eb53be43a92/ Log: Add __pypy__.thread.getsegmentlimit() diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -34,6 +34,15 @@ ------------------------------------------------------------ +__pypy__.thread.getsegmentlimit(): + +XXX This limit is so far a compile time option (STM_NB_SEGMENTS in +rpython/translator/stm/src_stm/stmgc.h), but this should instead be +based on the machine found at run-time. We should also be able to +change the limit (or at least lower it) with setsegmentlimit(). + +------------------------------------------------------------ + diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -41,6 +41,7 @@ '_atomic_exit': 'interp_atomic.atomic_exit', 'last_abort_info': 'interp_atomic.last_abort_info', 'discard_last_abort_info': 'interp_atomic.discard_last_abort_info', + 'getsegmentlimit': 'interp_atomic.getsegmentlimit', } diff --git a/pypy/module/__pypy__/interp_atomic.py b/pypy/module/__pypy__/interp_atomic.py --- a/pypy/module/__pypy__/interp_atomic.py +++ b/pypy/module/__pypy__/interp_atomic.py @@ -41,6 +41,24 @@ raise wrap_thread_error(space, "atomic.__exit__(): more exits than enters") +def getsegmentlimit(space): + '''Return the number of "segments" this PyPy is running with. + +With STM, multithreaded Python code executes on multiple segments in +parallel. This function gives the limit above which more threads will not +be able to execute on more cores. In a non-STM PyPy, this limit is 1. + +XXX This limit is so far a compile time option (STM_NB_SEGMENTS in +rpython/translator/stm/src_stm/stmgc.h), but this should instead be +based on the machine found at run-time. We should also be able to +change the limit (or at least lower it) with setsegmentlimit(). +''' + if space.config.translation.stm: + from rpython.rlib.rstm import stm_nb_segments + return space.wrap(stm_nb_segments) + else: + return space.wrap(1) + def last_abort_info(space): from rpython.rlib.rstm import charp_inspect_abort_info p = charp_inspect_abort_info() diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -8,6 +8,7 @@ TID = rffi.UINT tid_offset = CDefinedIntSymbolic('offsetof(struct rpyobj_s, tid)') +stm_nb_segments = CDefinedIntSymbolic('STM_NB_SEGMENTS') adr_nursery_free = CDefinedIntSymbolic('((long)&STM_SEGMENT->nursery_current)') adr_nursery_top = CDefinedIntSymbolic('((long)&STM_SEGMENT->nursery_end)') adr_transaction_read_version = ( From noreply at buildbot.pypy.org Mon Mar 24 07:38:43 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Mon, 24 Mar 2014 07:38:43 +0100 (CET) Subject: [pypy-commit] pypy add_PyErr_SetFromErrnoWithFilenameObject_try_2: Add PyErr_SetFromErrnoWithFilenameObject to cpyext Message-ID: <20140324063843.8D1A61C0225@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 Changeset: r70224:0bc8a372d4b2 Date: 2014-03-23 18:12 -0700 http://bitbucket.org/pypy/pypy/changeset/0bc8a372d4b2/ Log: Add PyErr_SetFromErrnoWithFilenameObject to cpyext diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -164,6 +164,17 @@ space.wrap(msg)) raise OperationError(w_type, w_error) + at cpython_api([PyObject, PyObject], PyObject) +def PyErr_SetFromErrnoWithFilenameObject(space, w_type, filename_object): + """Similar to PyErr_SetFromErrno(), with the additional behavior that if + filename_object is not NULL, it is passed to the constructor of type as a + third parameter. In the case of exceptions such as IOError and OSError, + this is used to define the filename attribute of the exception instance. + Return value: always NULL.""" + from pypy.module.cpyext.stringobject import PyString_AsString + PyErr_SetFromErrnoWithFilename(space, w_type, + PyString_AsString(space, filename_object)) + @cpython_api([], rffi.INT_real, error=-1) def PyErr_CheckSignals(space): """ diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -215,6 +215,25 @@ assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) + def test_SetFromErrnoWithFilenameObject(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *filenameObject = PyString_FromString("/path/to/file"); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, filenameObject); + Py_DECREF(filenameObject); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == "/path/to/file" + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + def test_PyErr_Display(self): module = self.import_extension('foo', [ ("display_error", "METH_VARARGS", From noreply at buildbot.pypy.org Mon Mar 24 07:38:44 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Mon, 24 Mar 2014 07:38:44 +0100 (CET) Subject: [pypy-commit] pypy add_PyErr_SetFromErrnoWithFilenameObject_try_2: Make PyErr_SetFromErrnoWithFilenameObject work with any Python object; not just strings Message-ID: <20140324063844.EA6921C0225@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 Changeset: r70225:a00eb16f44ee Date: 2014-03-23 23:29 -0700 http://bitbucket.org/pypy/pypy/changeset/a00eb16f44ee/ Log: Make PyErr_SetFromErrnoWithFilenameObject work with any Python object; not just strings diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -165,15 +165,24 @@ raise OperationError(w_type, w_error) @cpython_api([PyObject, PyObject], PyObject) -def PyErr_SetFromErrnoWithFilenameObject(space, w_type, filename_object): +def PyErr_SetFromErrnoWithFilenameObject(space, w_type, w_value): """Similar to PyErr_SetFromErrno(), with the additional behavior that if - filename_object is not NULL, it is passed to the constructor of type as a + w_value is not NULL, it is passed to the constructor of type as a third parameter. In the case of exceptions such as IOError and OSError, this is used to define the filename attribute of the exception instance. Return value: always NULL.""" - from pypy.module.cpyext.stringobject import PyString_AsString - PyErr_SetFromErrnoWithFilename(space, w_type, - PyString_AsString(space, filename_object)) + errno = get_errno() + msg = os.strerror(errno) + if w_value: + w_error = space.call_function(w_type, + space.wrap(errno), + space.wrap(msg), + w_value) + else: + w_error = space.call_function(w_type, + space.wrap(errno), + space.wrap(msg)) + raise OperationError(w_type, w_error) @cpython_api([], rffi.INT_real, error=-1) def PyErr_CheckSignals(space): diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -215,7 +215,7 @@ assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) - def test_SetFromErrnoWithFilenameObject(self): + def test_SetFromErrnoWithFilenameObject__PyString(self): import errno, os module = self.import_extension('foo', [ @@ -234,6 +234,82 @@ assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) + def test_SetFromErrnoWithFilenameObject__PyInt(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *intObject = PyInt_FromLong(3); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, intObject); + Py_DECREF(intObject); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == 3 + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyList(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three"); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, lst); + Py_DECREF(lst); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == [1, 2, "three"] + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyTuple(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three"); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, tuple); + Py_DECREF(tuple); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == (1, 2, "three") + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__Py_None(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *none = Py_BuildValue(""); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, none); + Py_DECREF(none); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename is None + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + def test_PyErr_Display(self): module = self.import_extension('foo', [ ("display_error", "METH_VARARGS", From noreply at buildbot.pypy.org Mon Mar 24 07:38:46 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 24 Mar 2014 07:38:46 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in msabramo/pypy/add_PyErr_SetFromErrnoWithFilenameObject_try_2 (pull request #211) Message-ID: <20140324063846.44D821C0225@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r70226:0f14db5ced36 Date: 2014-03-23 23:38 -0700 http://bitbucket.org/pypy/pypy/changeset/0f14db5ced36/ Log: Merged in msabramo/pypy/add_PyErr_SetFromErrnoWithFilenameObject_try_2 (pull request #211) Add PyErr_SetFromErrnoWithFilenameObject to cpyext diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -164,6 +164,26 @@ space.wrap(msg)) raise OperationError(w_type, w_error) + at cpython_api([PyObject, PyObject], PyObject) +def PyErr_SetFromErrnoWithFilenameObject(space, w_type, w_value): + """Similar to PyErr_SetFromErrno(), with the additional behavior that if + w_value is not NULL, it is passed to the constructor of type as a + third parameter. In the case of exceptions such as IOError and OSError, + this is used to define the filename attribute of the exception instance. + Return value: always NULL.""" + errno = get_errno() + msg = os.strerror(errno) + if w_value: + w_error = space.call_function(w_type, + space.wrap(errno), + space.wrap(msg), + w_value) + else: + w_error = space.call_function(w_type, + space.wrap(errno), + space.wrap(msg)) + raise OperationError(w_type, w_error) + @cpython_api([], rffi.INT_real, error=-1) def PyErr_CheckSignals(space): """ diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -215,6 +215,101 @@ assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) + def test_SetFromErrnoWithFilenameObject__PyString(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *filenameObject = PyString_FromString("/path/to/file"); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, filenameObject); + Py_DECREF(filenameObject); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == "/path/to/file" + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyInt(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *intObject = PyInt_FromLong(3); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, intObject); + Py_DECREF(intObject); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == 3 + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyList(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three"); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, lst); + Py_DECREF(lst); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == [1, 2, "three"] + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyTuple(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three"); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, tuple); + Py_DECREF(tuple); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == (1, 2, "three") + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__Py_None(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *none = Py_BuildValue(""); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, none); + Py_DECREF(none); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename is None + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + def test_PyErr_Display(self): module = self.import_extension('foo', [ ("display_error", "METH_VARARGS", From noreply at buildbot.pypy.org Mon Mar 24 08:16:47 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Mon, 24 Mar 2014 08:16:47 +0100 (CET) Subject: [pypy-commit] pypy test_SetFromErrnoWithFilename_NULL: Add test_SetFromErrnoWithFilename_NULL Message-ID: <20140324071647.B41301C0EE9@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: test_SetFromErrnoWithFilename_NULL Changeset: r70228:322c7911932a Date: 2014-03-24 00:12 -0700 http://bitbucket.org/pypy/pypy/changeset/322c7911932a/ Log: Add test_SetFromErrnoWithFilename_NULL Tests that PyErr_SetFromErrnoWithFilename with filename == NULL works diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -215,6 +215,23 @@ assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) + def test_SetFromErrnoWithFilename_NULL(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyErr_SetFromErrnoWithFilename(PyExc_OSError, NULL); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == None + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + def test_SetFromErrnoWithFilenameObject__PyString(self): import errno, os From noreply at buildbot.pypy.org Mon Mar 24 08:16:49 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 24 Mar 2014 08:16:49 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in msabramo/pypy/test_SetFromErrnoWithFilename_NULL (pull request #212) Message-ID: <20140324071649.014EC1C0EE9@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r70229:be52f96491e8 Date: 2014-03-24 00:16 -0700 http://bitbucket.org/pypy/pypy/changeset/be52f96491e8/ Log: Merged in msabramo/pypy/test_SetFromErrnoWithFilename_NULL (pull request #212) Add test_SetFromErrnoWithFilename_NULL diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -215,6 +215,23 @@ assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) + def test_SetFromErrnoWithFilename_NULL(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyErr_SetFromErrnoWithFilename(PyExc_OSError, NULL); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == None + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + def test_SetFromErrnoWithFilenameObject__PyString(self): import errno, os From noreply at buildbot.pypy.org Mon Mar 24 08:17:50 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 24 Mar 2014 08:17:50 +0100 (CET) Subject: [pypy-commit] pypy default: slightly more idiomatic Message-ID: <20140324071750.DFD7E1C0EE9@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r70231:735f901dbac2 Date: 2014-03-24 00:17 -0700 http://bitbucket.org/pypy/pypy/changeset/735f901dbac2/ Log: slightly more idiomatic diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -196,7 +196,7 @@ except OSError, e: assert e.errno == errno.EBADF assert e.strerror == os.strerror(errno.EBADF) - assert e.filename == None + assert e.filename is None def test_SetFromErrnoWithFilename(self): import errno, os @@ -228,7 +228,7 @@ ], prologue="#include ") exc_info = raises(OSError, module.set_from_errno) - assert exc_info.value.filename == None + assert exc_info.value.filename is None assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) From noreply at buildbot.pypy.org Mon Mar 24 08:27:09 2014 From: noreply at buildbot.pypy.org (Marc Abramowitz) Date: Mon, 24 Mar 2014 08:27:09 +0100 (CET) Subject: [pypy-commit] pypy refactor_PyErr_SetFromErrnoWithFilename: Refactor PyErr_SetFromErrnoWithFilename in terms of PyErr_SetFromErrnoWithFilenameObject Message-ID: <20140324072709.8D4EA1C0EE9@cobra.cs.uni-duesseldorf.de> Author: Marc Abramowitz Branch: refactor_PyErr_SetFromErrnoWithFilename Changeset: r70232:cbc7da63cd1c Date: 2014-03-24 00:23 -0700 http://bitbucket.org/pypy/pypy/changeset/cbc7da63cd1c/ Log: Refactor PyErr_SetFromErrnoWithFilename in terms of PyErr_SetFromErrnoWithFilenameObject diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -150,19 +150,13 @@ this is used to define the filename attribute of the exception instance. Return value: always NULL.""" # XXX Doesn't actually do anything with PyErr_CheckSignals. - errno = get_errno() - msg = os.strerror(errno) if llfilename: w_filename = rffi.charp2str(llfilename) - w_error = space.call_function(w_type, - space.wrap(errno), - space.wrap(msg), - space.wrap(w_filename)) + filename = space.wrap(w_filename) else: - w_error = space.call_function(w_type, - space.wrap(errno), - space.wrap(msg)) - raise OperationError(w_type, w_error) + filename = space.w_None + + PyErr_SetFromErrnoWithFilenameObject(space, w_type, filename) @cpython_api([PyObject, PyObject], PyObject) def PyErr_SetFromErrnoWithFilenameObject(space, w_type, w_value): @@ -171,6 +165,7 @@ third parameter. In the case of exceptions such as IOError and OSError, this is used to define the filename attribute of the exception instance. Return value: always NULL.""" + # XXX Doesn't actually do anything with PyErr_CheckSignals. errno = get_errno() msg = os.strerror(errno) if w_value: From noreply at buildbot.pypy.org Mon Mar 24 08:27:10 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 24 Mar 2014 08:27:10 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in msabramo/pypy/refactor_PyErr_SetFromErrnoWithFilename (pull request #213) Message-ID: <20140324072710.BA2A71C0EE9@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r70233:fe72a15b0882 Date: 2014-03-24 00:26 -0700 http://bitbucket.org/pypy/pypy/changeset/fe72a15b0882/ Log: Merged in msabramo/pypy/refactor_PyErr_SetFromErrnoWithFilename (pull request #213) Refactor PyErr_SetFromErrnoWithFilename in terms of PyErr_SetFromErrnoWithFilenameObject diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -150,19 +150,13 @@ this is used to define the filename attribute of the exception instance. Return value: always NULL.""" # XXX Doesn't actually do anything with PyErr_CheckSignals. - errno = get_errno() - msg = os.strerror(errno) if llfilename: w_filename = rffi.charp2str(llfilename) - w_error = space.call_function(w_type, - space.wrap(errno), - space.wrap(msg), - space.wrap(w_filename)) + filename = space.wrap(w_filename) else: - w_error = space.call_function(w_type, - space.wrap(errno), - space.wrap(msg)) - raise OperationError(w_type, w_error) + filename = space.w_None + + PyErr_SetFromErrnoWithFilenameObject(space, w_type, filename) @cpython_api([PyObject, PyObject], PyObject) def PyErr_SetFromErrnoWithFilenameObject(space, w_type, w_value): @@ -171,6 +165,7 @@ third parameter. In the case of exceptions such as IOError and OSError, this is used to define the filename attribute of the exception instance. Return value: always NULL.""" + # XXX Doesn't actually do anything with PyErr_CheckSignals. errno = get_errno() msg = os.strerror(errno) if w_value: From noreply at buildbot.pypy.org Mon Mar 24 10:45:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 10:45:20 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Mark these as needing refactoring Message-ID: <20140324094520.724C81C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70235:5279e26ea559 Date: 2014-03-24 08:05 +0100 http://bitbucket.org/pypy/pypy/changeset/5279e26ea559/ Log: Mark these as needing refactoring diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -70,11 +70,6 @@ stmonly_jitdriver.jit_merge_point( self=self, co_code=co_code, next_instr=next_instr, ec=ec) - # nothing inbetween! - # XXX REMOVED TEMPORARILY? - #if rstm.jit_stm_should_break_transaction(False): - # rstm.jit_stm_transaction_break_point() - #self = self._hints_for_stm() next_instr = self.handle_bytecode(co_code, next_instr, ec) except ExitFrame: return self.popvalue() diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -24,14 +24,16 @@ def jit_stm_transaction_break_point(): + # XXX REFACTOR AWAY if we_are_translated(): llop.jit_stm_transaction_break_point(lltype.Void) def jit_stm_should_break_transaction(if_there_is_no_other): + # XXX REFACTOR AWAY # if_there_is_no_other means that we use this point only # if there is no other break point in the trace. # If it is False, the point may be used if it comes right - # a CALL_RELEASE_GIL + # after a CALL_RELEASE_GIL return llop.jit_stm_should_break_transaction(lltype.Bool, if_there_is_no_other) From noreply at buildbot.pypy.org Mon Mar 24 10:45:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 10:45:21 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fix Message-ID: <20140324094521.D48EC1C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70236:f34f88635179 Date: 2014-03-24 08:32 +0100 http://bitbucket.org/pypy/pypy/changeset/f34f88635179/ Log: fix diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -629,7 +629,7 @@ oplist = [ "setfield_raw(i1, i2, descr=tydescr)", "setarrayitem_raw(i1, i2, i3, descr=tydescr)", - "setinteriorfield_raw(i1, i2, i3, descr=intzdescr)", + #"setinteriorfield_raw(i1, i2, i3, descr=intzdescr)", -- no such op "escape(i1)", # a generic unknown operation ] for op in oplist: From noreply at buildbot.pypy.org Mon Mar 24 10:45:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 10:45:23 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fix Message-ID: <20140324094523.0C3181C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70237:f189e18e060d Date: 2014-03-24 08:34 +0100 http://bitbucket.org/pypy/pypy/changeset/f189e18e060d/ Log: fix diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -20,7 +20,7 @@ adr_jit_default_msg = ( CDefinedIntSymbolic('((long)"return from JITted function")')) adr__stm_become_inevitable = ( - CDefinedIntSymbolic('((long)&_stm_become_inevitable)') + CDefinedIntSymbolic('((long)&_stm_become_inevitable)')) def jit_stm_transaction_break_point(): From noreply at buildbot.pypy.org Mon Mar 24 10:45:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 10:45:24 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Replace calls to "stm_should_break_transaction" with a simple Message-ID: <20140324094524.55EDD1C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70238:e16ca2b6ffed Date: 2014-03-24 08:35 +0100 http://bitbucket.org/pypy/pypy/changeset/e16ca2b6ffed/ Log: Replace calls to "stm_should_break_transaction" with a simple jit resop now that it can be implemented easily by the backend. diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1033,6 +1033,9 @@ def execute_cond_call_gc_wb_array(self, descr, a, b): py.test.skip("cond_call_gc_wb_array not supported") + def execute_stm_should_break_transaction(self, _): + return 0 + def execute_stm_transaction_break(self, _, really_wanted): pass diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -25,6 +25,9 @@ self.newops.append(op) return # ---------- transaction breaks ---------- + if opnum == rop.STM_SHOULD_BREAK_TRANSACTION: + self.newops.append(op) + return if opnum == rop.STM_TRANSACTION_BREAK: self.emitting_an_operation_that_can_collect() self.next_op_may_be_in_new_transaction() @@ -41,15 +44,6 @@ return # ---------- calls ---------- if op.is_call(): - if opnum == rop.CALL and op.getdescr(): - d = op.getdescr() - assert isinstance(d, CallDescr) - ei = d.get_extra_info() - if ei and (ei.oopspecindex == - EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION): - self.newops.append(op) - return - # self.next_op_may_be_in_new_transaction() # if opnum == rop.CALL_RELEASE_GIL: diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2052,9 +2052,7 @@ assert isinstance(signloc, ImmedLoc) cb.ressign = signloc.value - if effectinfo and effectinfo.oopspecindex == EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION: - cb.emit_no_collect() - elif is_call_release_gil: + if is_call_release_gil: cb.emit_call_release_gil() else: cb.emit() diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -86,8 +86,6 @@ OS_JIT_FORCE_VIRTUAL = 120 OS_JIT_FORCE_VIRTUALIZABLE = 121 - OS_JIT_STM_SHOULD_BREAK_TRANSACTION = 130 - # for debugging: _OS_CANRAISE = set([ OS_NONE, OS_STR2UNICODE, OS_LIBFFI_CALL, OS_RAW_MALLOC_VARSIZE_CHAR, diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -56,13 +56,10 @@ assert len(self.cached_ops) == 1 assert self.cached_ops[0].getopnum() == rop.FORCE_TOKEN self.cached_ops.append(op) - - def optimize_CALL(self, op): + + def optimize_STM_SHOULD_BREAK_TRANSACTION(self, op): self.flush_cached() - effectinfo = op.getdescr().get_extra_info() - oopspecindex = effectinfo.oopspecindex - if oopspecindex == EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION: - self._set_break_wanted(False) + self._set_break_wanted(False) self.emit_operation(op) def optimize_STM_TRANSACTION_BREAK(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py --- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_stm.py @@ -8,22 +8,14 @@ class TestSTM(BaseTestWithUnroll, LLtypeMixin): stm = True - FUNC = lltype.FuncType([], lltype.Signed) - sbtdescr = LLtypeMixin.cpu.calldescrof( - FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo([], [], [], [], - EffectInfo.EF_CANNOT_RAISE, - oopspecindex=EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION, - can_invalidate=False) - ) namespace = LLtypeMixin.namespace.copy() namespace.update(locals()) - - + + def test_unrolled_loop(self): ops = """ [] - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump() """ @@ -66,7 +58,7 @@ stm_transaction_break(0) guard_not_forced() [] - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump() @@ -76,14 +68,14 @@ stm_transaction_break(0) guard_not_forced() [] - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump() """ expected = """ [] - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump() """ @@ -94,7 +86,7 @@ [p1] i1 = getfield_gc(p1, descr=adescr) - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump(p1) """ @@ -102,13 +94,13 @@ [p1] i1 = getfield_gc(p1, descr=adescr) - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump(p1) """ expected = """ [p1] - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump(p1) @@ -124,7 +116,7 @@ guard_not_forced() [] stm_transaction_break(0) guard_not_forced() [] - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump() """ @@ -133,13 +125,13 @@ stm_transaction_break(0) guard_not_forced() [] - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump() """ expected = """ [] - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump() """ @@ -158,7 +150,7 @@ guard_not_forced() [] stm_transaction_break(0) guard_not_forced() [] - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump() """ @@ -173,7 +165,7 @@ stm_transaction_break(0) guard_not_forced() [] - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump() """ @@ -185,7 +177,7 @@ stm_transaction_break(0) guard_not_forced() [] - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump() """ @@ -218,7 +210,7 @@ p6 = force_token() # not removed! - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump(p0) """ @@ -233,7 +225,7 @@ p6 = force_token() # not removed! - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump(p0) """ @@ -243,7 +235,7 @@ p6 = force_token() # not removed! - i0 = call(123, descr=sbtdescr) + i0 = stm_should_break_transaction() guard_false(i0) [] jump(p0) """ diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -206,13 +206,7 @@ mi = self.metainterp if val: resbox = history.BoxInt(0) - funcptr = mi.staticdata.stm_should_break_transaction - funcdescr = mi.staticdata.stm_should_break_transaction_descr - funcaddr = llmemory.cast_ptr_to_adr(funcptr) - mi._record_helper_nonpure_varargs( - rop.CALL, resbox, funcdescr, - [ConstInt(heaptracker.adr2int(funcaddr)),]) - # + mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, [], resbox) return resbox else: self._record_stm_transaction_break(False) @@ -1538,24 +1532,6 @@ d = self.exit_frame_with_exception_descr_ref self.cpu.exit_frame_with_exception_descr_ref = d - if self.config.translation.stm: - self.stm_should_break_transaction = rffi.llexternal( - 'stm_should_break_transaction', - [], lltype.Bool, - sandboxsafe=True, _nowrapper=True, transactionsafe=True, - _callable=lambda : False) - FUNC = lltype.typeOf(self.stm_should_break_transaction).TO - - ei = EffectInfo([], [], [], [], [], [], - EffectInfo.EF_CANNOT_RAISE, - oopspecindex=EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION, - can_invalidate=False) - - self.stm_should_break_transaction_descr = ( - self.cpu.calldescrof(FUNC, FUNC.ARGS, - FUNC.RESULT, ei)) - - def _freeze_(self): return True @@ -2499,7 +2475,7 @@ # it by ConstPtr(NULL). self.stop_tracking_virtualref(i) - def vable_after_residual_call(self, funcbox): + def vable_after_residual_call(self, funcbox=None): vinfo = self.jitdriver_sd.virtualizable_info if vinfo is not None: virtualizable_box = self.virtualizable_boxes[-1] @@ -2507,11 +2483,15 @@ if vinfo.tracing_after_residual_call(virtualizable): # the virtualizable escaped during CALL_MAY_FORCE. self.load_fields_from_virtualizable() - target_name = self.staticdata.get_name_from_address(funcbox.getaddr()) - if target_name: - target_name = "ConstClass(%s)" % target_name + if funcbox is None: + target_name = "?" else: - target_name = str(funcbox.getaddr()) + target_name = self.staticdata.get_name_from_address( + funcbox.getaddr()) + if target_name: + target_name = "ConstClass(%s)" % target_name + else: + target_name = str(funcbox.getaddr()) debug_print('vable escaped during a call in %s to %s' % ( self.framestack[-1].jitcode.name, target_name )) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -485,6 +485,7 @@ 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', + 'STM_SHOULD_BREAK_TRANSACTION/0', 'MARK_OPAQUE_PTR/1b', # this one has no *visible* side effect, since the virtualizable # must be forced, however we need to execute it anyway diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -14,7 +14,8 @@ return rstm.jit_stm_should_break_transaction(False) res = self.interp_operations(g, [], translationoptions={"stm":True}) assert res == False - self.check_operations_history(stm_transaction_break=1) + self.check_operations_history(stm_transaction_break=1, + stm_should_break_transaction=0) def test_not_removed(self): import time @@ -23,14 +24,17 @@ return rstm.jit_stm_should_break_transaction(False) res = self.interp_operations(g, [], translationoptions={"stm":True}) assert res == False - self.check_operations_history(stm_transaction_break=1, call_may_force=1) + self.check_operations_history(stm_transaction_break=1, + call_may_force=1, + stm_should_break_transaction=0) def test_not_removed2(self): def g(): return rstm.jit_stm_should_break_transaction(True) res = self.interp_operations(g, [], translationoptions={"stm":True}) assert res == False - self.check_operations_history(call=1, stm_transaction_break=0) + self.check_operations_history(stm_transaction_break=0, + stm_should_break_transaction=1) def test_transaction_break(self): def g(): From noreply at buildbot.pypy.org Mon Mar 24 10:45:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 10:45:25 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: in-progress Message-ID: <20140324094525.971671C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70239:218296eca9e2 Date: 2014-03-24 10:44 +0100 http://bitbucket.org/pypy/pypy/changeset/218296eca9e2/ Log: in-progress diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -654,7 +654,8 @@ op.getopnum() == rop.STM_TRANSACTION_BREAK): assert operations[i + 1].getopnum() == rop.GUARD_NOT_FORCED return True - if not op.is_comparison(): + if (not op.is_comparison() and + op.getopnum() != rop.STM_SHOULD_BREAK_TRANSACTION): if op.is_ovf(): if (operations[i + 1].getopnum() != rop.GUARD_NO_OVERFLOW and operations[i + 1].getopnum() != rop.GUARD_OVERFLOW): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1918,51 +1918,56 @@ def setup_failure_recovery(self): self.failure_recovery_code = [0, 0, 0, 0] - def _push_all_regs_to_frame(self, mc, ignored_regs, withfloats, - callee_only=False): - # Push all general purpose registers + def _push_pop_regs_to_frame(self, push, mc, grp_regs, xmm_regs): + # Push the general purpose registers base_ofs = self.cpu.get_baseofs_of_frame_field() + for gpr in grp_regs: + v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + addr = (self.SEGMENT_FRAME, v * WORD + base_ofs) + if push: + mc.MOV_br(addr, gpr.value) + else: + mc.MOV_rb(gpr.value, addr) + # Push the XMM regs + if IS_X86_64: + coeff = 1 + else: + coeff = 2 + ofs = len(gpr_reg_mgr_cls.all_regs) + for xmm in xmm_regs: + addr = (self.SEGMENT_FRAME, + (ofs + xmm.value * coeff) * WORD + base_ofs) + if push: + mc.MOVSD_bx(addr, xmm.value) + else: + mc.MOVSD_xb(xmm.value, addr) + + def _do_with_registers(self, push, mc, + ignored_regs, withfloats, callee_only): if callee_only: regs = gpr_reg_mgr_cls.save_around_call_regs else: regs = gpr_reg_mgr_cls.all_regs - for gpr in regs: - if gpr not in ignored_regs: - v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] - mc.MOV_br((self.SEGMENT_FRAME, v * WORD + base_ofs), gpr.value) + regs = [grp for gpr in regs if gpr not in ignored_regs] if withfloats: - if IS_X86_64: - coeff = 1 - else: - coeff = 2 - # Push all XMM regs - ofs = len(gpr_reg_mgr_cls.all_regs) - for i in range(len(xmm_reg_mgr_cls.all_regs)): - mc.MOVSD_bx((self.SEGMENT_FRAME, - (ofs + i * coeff) * WORD + base_ofs), i) + xmm_regs = xmm_reg_mgr_cls.all_regs + else: + xmm_regs = [] + self._push_pop_regs_from_frame(push, mc, regs, xmm_regs) + + def _push_all_regs_to_frame(self, mc, ignored_regs, withfloats, + callee_only=False): + # Push all general purpose registers (or only the ones that a + # callee might clobber); and if withfloats, push all XMM regs + self._do_with_registers(True, mc, + ignored_regs, withfloats, callee_only) def _pop_all_regs_from_frame(self, mc, ignored_regs, withfloats, callee_only=False): - # Pop all general purpose registers - base_ofs = self.cpu.get_baseofs_of_frame_field() - if callee_only: - regs = gpr_reg_mgr_cls.save_around_call_regs - else: - regs = gpr_reg_mgr_cls.all_regs - for gpr in regs: - if gpr not in ignored_regs: - v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] - mc.MOV_rb(gpr.value, (self.SEGMENT_FRAME, v * WORD + base_ofs)) - if withfloats: - # Pop all XMM regs - if IS_X86_64: - coeff = 1 - else: - coeff = 2 - ofs = len(gpr_reg_mgr_cls.all_regs) - for i in range(len(xmm_reg_mgr_cls.all_regs)): - mc.MOVSD_xb(i, (self.SEGMENT_FRAME, - (ofs + i * coeff) * WORD + base_ofs)) + # Pop all general purpose registers (or only the ones that a + # callee might clobber); and if withfloats, pop all XMM regs + self._do_with_registers(False, mc, + ignored_regs, withfloats, callee_only) def _build_failure_recovery(self, exc, withfloats=False): mc = codebuf.MachineCodeBlockWrapper() @@ -2510,6 +2515,29 @@ assert isinstance(reg, RegLoc) self.mc.MOV_rr(reg.value, ebp.value) + def _generate_cmp_break_transaction(self): + # emits the check with a CMP instruction: + # pypy_stm_nursery_low_fill_mark < STM_SEGMENT->nursery_current + # so if it is followed with a JB, it will follow the jump if + # we should break the transaction now. + # + psnlfm_adr = rstm.adr_pypy_stm_nursery_low_fill_mark + self.mc.MOV(X86_64_SCRATCH_REG, self.heap_tl(psnlfm_adr)) + nf_adr = rstm.nursery_free_adr + assert rx86.fits_in_32bits(nf_adr) # because it is in the 2nd page + self.mc.CMP_rj(X86_64_SCRATCH_REG.value, (self.SEGMENT_GC, nf_adr)) + + def genop_guard_stm_should_break_transaction(self, op, guard_op, + guard_token, arglocs, + result_loc): + if not IS_X86_64: + todo() # "needed for X86_64_SCRATCH_REG" + self._generate_cmp_break_transaction() + if guard_op.getopnum() == rop.GUARD_FALSE: + self.implement_guard(guard_token, 'B') # JB goes to "yes, break" + else: + self.implement_guard(guard_token, 'AE') # JAE goes to "no, don't" + def genop_guard_stm_transaction_break(self, op, guard_op, guard_token, arglocs, result_loc): assert self.cpu.gc_ll_descr.stm @@ -2520,62 +2548,61 @@ self._store_force_index(guard_op) mc = self.mc - # if stm_should_break_transaction() + self._generate_cmp_break_transaction() + # use JAE to jump over the following piece of code if we don't need + # to break the transaction now + mc.J_il(rx86.Conditions['AE'], 0xfffff) # patched later + jae_location = mc.get_relative_pos() + # This is the case in which we have to do the same as the logic + # in pypy_stm_perform_transaction(). We know that we're not in + # an atomic transaction (otherwise the jump above always triggers). + # So we only have to do the following three operations: + # stm_commit_transaction(); + # __builtin_setjmp(jmpbuf); + # pypy_stm_start_transaction(&jmpbuf); - # XXX UD2 - #fn = stmtlocal.stm_should_break_transaction_fn - #mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) - mc.MOV(eax, imm(0)) - - - mc.TEST8(eax.lowest8bits(), eax.lowest8bits()) - mc.J_il(rx86.Conditions['Z'], 0xfffff) # patched later - jz_location2 = mc.get_relative_pos() + # save all registers and the gcmap + self.push_gcmap(mc, gcmap, mov=True) + grp_regs = self._regalloc.rm.reg_bindings.values() + xmm_regs = self._regalloc.xrm.reg_bindings.values() + self._push_pop_regs_from_frame(True, mc, grp_regs, xmm_regs) # - # call stm_transaction_break() with the address of the - # STM_RESUME_BUF and the custom longjmp function - self.push_gcmap(mc, gcmap, mov=True) + # call stm_commit_transaction() + mc.CALL(imm(rstm.adr_stm_commit_transaction)) # - # save all registers - base_ofs = self.cpu.get_baseofs_of_frame_field() - for gpr in self._regalloc.rm.reg_bindings.values(): - v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] - mc.MOV_br((self.SEGMENT_FRAME, v * WORD + base_ofs), gpr.value) - if IS_X86_64: - coeff = 1 - else: - coeff = 2 - ofs = len(gpr_reg_mgr_cls.all_regs) - for xr in self._regalloc.xrm.reg_bindings.values(): - mc.MOVSD_bx((self.SEGMENT_FRAME, - (ofs + xr.value * coeff) * WORD + base_ofs), xr.value) + # update the two words in the STM_RESUME_BUF, as described + # in arch.py. The "learip" pseudo-instruction turns into + # what is, in gnu as syntax: lea 0(%rip), %rax (the 0 is + # one byte, patched just below) + mc.LEARIP_rl8(eax, 0) + learip_location = mc.get_relative_pos() + mc.MOV_sr(STM_JMPBUF_OFS_RIP, eax) + mc.MOV_sr(STM_JMPBUF_OFS_RSP, esp) # - # CALL break function - fn = self.stm_transaction_break_path - mc.CALL(imm(fn)) + offset = mc.get_relative_pos() - learip_location + assert 0 < offset <= 127 + mc.overwrite(learip_location - 1, chr(offset)) # ** HERE ** is the place an aborted transaction retries - # ebp/frame reloaded by longjmp callback + # (when resuming, ebp is garbage, but the STM_RESUME_BUF is + # still correct in case of repeated aborting) + # + # call pypy_stm_start_transaction(&jmpbuf) + mc.LEA_rs(edi, STM_JMPBUF_OFS) + mc.CALL(imm(rstm.adr_pypy_stm_start_transaction)) + # + # reload ebp (the frame) now + self._reload_frame_if_necessary(self.mc) # # restore regs - base_ofs = self.cpu.get_baseofs_of_frame_field() - for gpr in self._regalloc.rm.reg_bindings.values(): - v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] - mc.MOV_rb(gpr.value, (self.SEGMENT_FRAME, v * WORD + base_ofs)) - if IS_X86_64: - coeff = 1 - else: - coeff = 2 - ofs = len(gpr_reg_mgr_cls.all_regs) - for xr in self._regalloc.xrm.reg_bindings.values(): - mc.MOVSD_xb(xr.value, (self.SEGMENT_FRAME, - (ofs + xr.value * coeff) * WORD + base_ofs)) + self._push_pop_regs_from_frame(False, mc, grp_regs, xmm_regs) # - # patch the JZ above - offset = mc.get_relative_pos() - jz_location2 - mc.overwrite32(jz_location2-4, offset) + self._emit_guard_not_forced(guard_token) - self._emit_guard_not_forced(guard_token) + # patch the JAE above (note that we also skip the guard_not_forced + # in the common situation where we jump over the code above) + offset = mc.get_relative_pos() - jae_location + mc.overwrite32(jae_location-4, offset) def genop_discard_stm_read(self, op, arglocs): if not IS_X86_64: diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1235,14 +1235,13 @@ self.fm.hint_frame_pos[box] = self.fm.get_loc_index(loc) + def consider_stm_should_break_transaction(self, op, guard_op): + if guard_op is None: + self.not_implemented_op(op) + self.perform_with_guard(op, guard_op, [], None) + def consider_stm_transaction_break(self, op, guard_op): - # - # only save regs for the should_break_transaction call - self.xrm.before_call() - self.rm.before_call() - # self.perform_with_guard(op, guard_op, [], None) - def consider_jump(self, op): assembler = self.assembler @@ -1393,7 +1392,8 @@ or num == rop.CALL_MAY_FORCE or num == rop.CALL_ASSEMBLER or num == rop.CALL_RELEASE_GIL - or num == rop.STM_TRANSACTION_BREAK): + or num == rop.STM_TRANSACTION_BREAK + or num == rop.STM_SHOULD_BREAK_TRANSACTION): oplist_with_guard[num] = value oplist[num] = add_none_argument(value) else: diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -696,6 +696,8 @@ self.writechar(chr((imm >> 56) & 0xFF)) CQO = insn(rex_w, '\x99') + LEARIP_rl8 = insn(rex_w, '\x8D', register(1, 8), chr(0x05), + immediate(2, 'b')) # Three different encodings... following what gcc does. From the # shortest encoding to the longest one. diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -11,6 +11,8 @@ stm_nb_segments = CDefinedIntSymbolic('STM_NB_SEGMENTS') adr_nursery_free = CDefinedIntSymbolic('((long)&STM_SEGMENT->nursery_current)') adr_nursery_top = CDefinedIntSymbolic('((long)&STM_SEGMENT->nursery_end)') +adr_pypy_stm_nursery_low_fill_mark = ( + CDefinedIntSymbolic('((long)&pypy_stm_nursery_low_fill_mark')) adr_transaction_read_version = ( CDefinedIntSymbolic('((long)&STM_SEGMENT->transaction_read_version)')) adr_jmpbuf_ptr = ( @@ -21,6 +23,10 @@ CDefinedIntSymbolic('((long)"return from JITted function")')) adr__stm_become_inevitable = ( CDefinedIntSymbolic('((long)&_stm_become_inevitable)')) +adr_stm_commit_transaction = ( + CDefinedIntSymbolic('((long)&stm_commit_transaction)')) +adr_pypy_stm_start_transaction = ( + CDefinedIntSymbolic('((long)&pypy_stm_start_transaction)')) def jit_stm_transaction_break_point(): diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -90,12 +90,38 @@ } } +void pypy_stm_start_transaction(stm_jmpbuf_t *jmpbuf_ptr, + volatile long *v_counter) +{ + _stm_start_transaction(&stm_thread_local, jmpbuf_ptr); + + /* If v_counter==0, initialize 'pypy_stm_nursery_low_fill_mark' + from the configured length limit. If v_counter>0, we did an + abort, and we now configure 'pypy_stm_nursery_low_fill_mark' + to a value slightly smaller than the value at last abort. + */ + long counter, limit; + counter = *v_counter; + *v_counter = counter + 1; + + if (counter == 0) { + limit = pypy_transaction_length; + } + else { + limit = stm_thread_local.last_abort__bytes_in_nursery; + limit -= (limit >> 4); + } + pypy_stm_nursery_low_fill_mark = _stm_nursery_start + limit; + pypy_stm_ready_atomic = 1; /* reset after abort */ +} + void pypy_stm_perform_transaction(object_t *arg, int callback(object_t *, int)) { /* must save roots around this call */ stm_jmpbuf_t jmpbuf; long volatile v_counter = 0; + int (*volatile v_callback)(object_t *, int) = callback; #ifndef NDEBUG - struct stm_shadowentry_s *volatile old_shadowstack = + struct stm_shadowentry_s *volatile v_old_shadowstack = stm_thread_local.shadowstack; #endif @@ -105,42 +131,28 @@ while (1) { if (pypy_stm_ready_atomic == 1) { + /* Not in an atomic transaction + */ stm_commit_transaction(); - STM_START_TRANSACTION(&stm_thread_local, jmpbuf); - pypy_stm_ready_atomic = 1; /* reset after abort */ - } - /* After setjmp(), the local variables v_* are preserved because they - * are volatile. The other variables are only declared here. */ - long counter, result; - counter = v_counter; - v_counter = counter + 1; - - /* If counter==0, initialize 'pypy_stm_nursery_low_fill_mark' - from the configured length limit. If counter>0, we did an - abort, and we can now configure 'pypy_stm_nursery_low_fill_mark' - to a value slightly smaller than the value at last abort. - */ - if (stm_is_inevitable()) { - pypy_stm_nursery_low_fill_mark = 0; + /* After setjmp(), the local variables v_* are preserved because + they are volatile. The other local variables should be + declared below than this point only. + */ + while (__builtin_setjmp(jmpbuf) == 1) { /*redo setjmp*/ } + pypy_stm_start_transaction(&jmpbuf, &v_counter); } else { - long limit; - if (counter == 0) { - limit = pypy_transaction_length; - } - else { - limit = stm_thread_local.last_abort__bytes_in_nursery; - limit -= (limit >> 4); - } - pypy_stm_nursery_low_fill_mark = _stm_nursery_start + limit; + /* In an atomic transaction */ + assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); } /* invoke the callback in the new transaction */ STM_POP_ROOT(stm_thread_local, arg); - assert(old_shadowstack == stm_thread_local.shadowstack); + assert(v_old_shadowstack == stm_thread_local.shadowstack); STM_PUSH_ROOT(stm_thread_local, arg); - result = callback(arg, counter); + + long result = v_callback(arg, counter); if (result <= 0) break; v_counter = 0; @@ -157,11 +169,12 @@ } else { _stm_become_inevitable("perform_transaction left with atomic"); + assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); } } //gcptr x = stm_pop_root(); /* pop the END_MARKER */ //assert(x == END_MARKER_OFF || x == END_MARKER_ON); STM_POP_ROOT_RET(stm_thread_local); /* pop the 'arg' */ - assert(old_shadowstack == stm_thread_local.shadowstack); + assert(v_old_shadowstack == stm_thread_local.shadowstack); } diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -11,6 +11,7 @@ extern __thread struct stm_thread_local_s stm_thread_local; extern __thread long pypy_stm_ready_atomic; extern __thread uintptr_t pypy_stm_nursery_low_fill_mark; +extern __thread uintptr_t pypy_stm_nursery_low_fill_mark_saved; void pypy_stm_setup(void); void pypy_stm_setup_prebuilt(void); /* generated into stm_prebuilt.c */ @@ -35,11 +36,26 @@ } } static inline void pypy_stm_increment_atomic(void) { - pypy_stm_ready_atomic++; + switch (++pypy_stm_ready_atomic) { + case 2: + pypy_stm_nursery_low_fill_mark_saved = pypy_stm_nursery_low_fill_mark; + pypy_stm_nursery_low_fill_mark = (uintptr_t) -1; + break; + default: + break; + } } static inline void pypy_stm_decrement_atomic(void) { - if (--pypy_stm_ready_atomic == 0) + switch (--pypy_stm_ready_atomic) { + case 1: + pypy_stm_nursery_low_fill_mark = pypy_stm_nursery_low_fill_mark_saved; + break; + case 0: pypy_stm_ready_atomic = 1; + break; + default: + break; + } } static inline long pypy_stm_get_atomic(void) { return pypy_stm_ready_atomic - 1; @@ -48,15 +64,19 @@ void pypy_stm_leave_callback_call(long); void pypy_stm_set_transaction_length(double); void pypy_stm_perform_transaction(object_t *, int(object_t *, int)); +void pypy_stm_start_transaction(stm_jmpbuf_t *, volatile long *); static inline int pypy_stm_should_break_transaction(void) { /* we should break the current transaction if we have used more than some initial portion of the nursery, or if we are running inevitable - (in which case pypy_stm_nursery_low_fill_mark is set to 0) + (in which case pypy_stm_nursery_low_fill_mark is set to 0). + If the transaction is atomic, pypy_stm_nursery_low_fill_mark is + instead set to (uintptr_t) -1, and the following check is never true. */ uintptr_t current = (uintptr_t)STM_SEGMENT->nursery_current; return current > pypy_stm_nursery_low_fill_mark; + /* NB. this logic is hard-coded in jit/backend/x86/assembler.py too */ } From noreply at buildbot.pypy.org Mon Mar 24 10:48:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 10:48:09 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: kill Message-ID: <20140324094809.1187A1C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70240:7ad2cf207101 Date: 2014-03-24 10:47 +0100 http://bitbucket.org/pypy/pypy/changeset/7ad2cf207101/ Log: kill diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -199,58 +199,6 @@ mc.RET() return mc.materialize(self.cpu.asmmemmgr, []) - def _build_stm_transaction_break_path(self): - assert self.cpu.gc_ll_descr.stm - if not we_are_translated(): - return # tests only - - """ While arriving on slowpath, we have a gcpattern on stack 0. - This function does not have to preserve registers. It expects - all registers to be saved in the caller. - """ - mc = codebuf.MachineCodeBlockWrapper() - # store the gc pattern - ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.MOV_rs(ecx.value, WORD) - mc.MOV_br((self.SEGMENT_FRAME, ofs), ecx.value) - # - # align on 16b boundary (there is a retaddr on the stack) - mc.SUB_ri(esp.value, 16 - WORD) - # - # call stm_transaction_break() with the address of the - # STM_RESUME_BUF and the custom longjmp function - # (rsp + FRAME_FIXED_SIZE + RET_ADDR + ALIGNMENT) - mc.LEA_rs(edi.value, FRAME_FIXED_SIZE * WORD + WORD + (16-WORD)) - mc.MOV(esi, imm(self.stm_longjmp_callback_addr)) - - # XXX UD2 - #fn = stmtlocal.stm_transaction_break_fn - #mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) - - # - self._reload_frame_if_necessary(mc) - # - mc.ADD_ri(esp.value, 16 - WORD) - # clear the gc pattern - mc.MOV_bi((self.SEGMENT_FRAME, ofs), 0) - # - # Fill the stm resume buffer. Don't do it before the call! - # The previous transaction may still be aborted during the call - # above, so we need the old content of the buffer! - # The buffer contains the address of the resume point which - # is the RET_ADDR of this called piece of code. This will be - # put at offset 0 of the buffer, at offset WORD, there is a - # copy of the current shadowstack pointer. - mc.POP_r(eax.value) # get ret addr - self._load_shadowstack_top_in_ebx(mc, self.cpu.gc_ll_descr.gcrootmap) - mc.MOV_sr((FRAME_FIXED_SIZE + 1) * WORD, ebx.value) - mc.MOV_sr((FRAME_FIXED_SIZE + 0) * WORD, eax.value) - mc.JMP_r(eax.value) - # - rawstart = mc.materialize(self.cpu.asmmemmgr, []) - return rawstart - - def _build_malloc_slowpath(self, kind): """ While arriving on slowpath, we have a gcpattern on stack 0. The arguments are passed in eax and edi, as follows: @@ -847,8 +795,9 @@ def _call_footer(self): gcrootmap = self.cpu.gc_ll_descr.gcrootmap if self.cpu.gc_ll_descr.stm and we_are_translated(): - # call stm_invalidate_jmp_buf(), in case we called - # stm_transaction_break() earlier + # call _stm_become_inevitable() if the current jmpbuf is set + # to this frame, because we're about to leave. This is if + # we called a pypy_stm_start_transaction() earlier. assert IS_X86_64 # # load the shadowstack pointer into ebx, and decrement it, From noreply at buildbot.pypy.org Mon Mar 24 11:01:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 11:01:28 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: typos Message-ID: <20140324100128.392811C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70241:f54a6241180a Date: 2014-03-24 11:00 +0100 http://bitbucket.org/pypy/pypy/changeset/f54a6241180a/ Log: typos diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -85,10 +85,6 @@ self._build_wb_slowpath(False) self._build_wb_slowpath(True) self._build_wb_slowpath(False, for_frame=True) - # only for stm: - if gc_ll_descr.stm: - self._build_stm_longjmp_callback() - self.stm_transaction_break_path = self._build_stm_transaction_break_path() # only one of those self.build_frame_realloc_slowpath() if self.cpu.supports_floats: diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -18,7 +18,8 @@ from rpython.jit.backend.llsupport.regalloc import (get_scale, valid_addressing_size) from rpython.jit.backend.x86.arch import ( FRAME_FIXED_SIZE, WORD, IS_X86_64, JITFRAME_FIXED_SIZE, IS_X86_32, - PASS_ON_MY_FRAME, STM_FRAME_FIXED_SIZE) + PASS_ON_MY_FRAME, STM_FRAME_FIXED_SIZE, STM_JMPBUF_OFS, + STM_JMPBUF_OFS_RIP, STM_JMPBUF_OFS_RSP) from rpython.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, r8, r9, r10, r11, edi, r12, r13, r14, r15, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, @@ -1897,12 +1898,12 @@ regs = gpr_reg_mgr_cls.save_around_call_regs else: regs = gpr_reg_mgr_cls.all_regs - regs = [grp for gpr in regs if gpr not in ignored_regs] + regs = [gpr for gpr in regs if gpr not in ignored_regs] if withfloats: xmm_regs = xmm_reg_mgr_cls.all_regs else: xmm_regs = [] - self._push_pop_regs_from_frame(push, mc, regs, xmm_regs) + self._push_pop_regs_to_frame(push, mc, regs, xmm_regs) def _push_all_regs_to_frame(self, mc, ignored_regs, withfloats, callee_only=False): @@ -2472,7 +2473,7 @@ # psnlfm_adr = rstm.adr_pypy_stm_nursery_low_fill_mark self.mc.MOV(X86_64_SCRATCH_REG, self.heap_tl(psnlfm_adr)) - nf_adr = rstm.nursery_free_adr + nf_adr = rstm.adr_nursery_free assert rx86.fits_in_32bits(nf_adr) # because it is in the 2nd page self.mc.CMP_rj(X86_64_SCRATCH_REG.value, (self.SEGMENT_GC, nf_adr)) @@ -2515,7 +2516,7 @@ self.push_gcmap(mc, gcmap, mov=True) grp_regs = self._regalloc.rm.reg_bindings.values() xmm_regs = self._regalloc.xrm.reg_bindings.values() - self._push_pop_regs_from_frame(True, mc, grp_regs, xmm_regs) + self._push_pop_regs_to_frame(True, mc, grp_regs, xmm_regs) # # call stm_commit_transaction() mc.CALL(imm(rstm.adr_stm_commit_transaction)) @@ -2524,10 +2525,10 @@ # in arch.py. The "learip" pseudo-instruction turns into # what is, in gnu as syntax: lea 0(%rip), %rax (the 0 is # one byte, patched just below) - mc.LEARIP_rl8(eax, 0) + mc.LEARIP_rl8(eax.value, 0) learip_location = mc.get_relative_pos() - mc.MOV_sr(STM_JMPBUF_OFS_RIP, eax) - mc.MOV_sr(STM_JMPBUF_OFS_RSP, esp) + mc.MOV_sr(STM_JMPBUF_OFS_RIP, eax.value) + mc.MOV_sr(STM_JMPBUF_OFS_RSP, esp.value) # offset = mc.get_relative_pos() - learip_location assert 0 < offset <= 127 @@ -2537,14 +2538,14 @@ # still correct in case of repeated aborting) # # call pypy_stm_start_transaction(&jmpbuf) - mc.LEA_rs(edi, STM_JMPBUF_OFS) + mc.LEA_rs(edi.value, STM_JMPBUF_OFS) mc.CALL(imm(rstm.adr_pypy_stm_start_transaction)) # # reload ebp (the frame) now self._reload_frame_if_necessary(self.mc) # # restore regs - self._push_pop_regs_from_frame(False, mc, grp_regs, xmm_regs) + self._push_pop_regs_to_frame(False, mc, grp_regs, xmm_regs) # self._emit_guard_not_forced(guard_token) From noreply at buildbot.pypy.org Mon Mar 24 11:18:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 11:18:45 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: more typos Message-ID: <20140324101845.098F21C1018@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70242:e2ff7181a058 Date: 2014-03-24 11:18 +0100 http://bitbucket.org/pypy/pypy/changeset/e2ff7181a058/ Log: more typos diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -12,7 +12,7 @@ adr_nursery_free = CDefinedIntSymbolic('((long)&STM_SEGMENT->nursery_current)') adr_nursery_top = CDefinedIntSymbolic('((long)&STM_SEGMENT->nursery_end)') adr_pypy_stm_nursery_low_fill_mark = ( - CDefinedIntSymbolic('((long)&pypy_stm_nursery_low_fill_mark')) + CDefinedIntSymbolic('((long)&pypy_stm_nursery_low_fill_mark)')) adr_transaction_read_version = ( CDefinedIntSymbolic('((long)&STM_SEGMENT->transaction_read_version)')) adr_jmpbuf_ptr = ( @@ -20,7 +20,7 @@ adr_write_slowpath = CDefinedIntSymbolic('((long)&_stm_write_slowpath)') adr_jit_default_msg = ( - CDefinedIntSymbolic('((long)"return from JITted function")')) + CDefinedIntSymbolic('((long)(char *)"return from JITted function")')) adr__stm_become_inevitable = ( CDefinedIntSymbolic('((long)&_stm_become_inevitable)')) adr_stm_commit_transaction = ( diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -11,6 +11,10 @@ #define OP_STACK_CURRENT(r) r = (Signed)&r +#define OP_JIT_STM_TRANSACTION_BREAK_POINT(r) /* nothing */ +#define OP_JIT_STM_SHOULD_BREAK_TRANSACTION(x, r) r = 0 + + #ifdef RPY_STM void _pypy_stm_free(void *); #define _OP_RAW_MALLOCED(r) stm_call_on_abort(&stm_thread_local, r, \ diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -222,13 +222,6 @@ ## stm_clear_on_abort(&pypy_g_ExcData, sizeof(pypy_g_ExcData)); ## ''' -##def jit_stm_transaction_break_point(funcgen, op): -## return '/* jit_stm_transaction_break_point */' - -##def jit_stm_should_break_transaction(funcgen, op): -## result = funcgen.expr(op.result) -## return '%s = 0; /* jit_stm_should_break_transaction */' % (result, ) - ##def stm_finalize(funcgen, op): ## return 'stm_finalize();' diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -152,7 +152,7 @@ assert(v_old_shadowstack == stm_thread_local.shadowstack); STM_PUSH_ROOT(stm_thread_local, arg); - long result = v_callback(arg, counter); + long result = v_callback(arg, v_counter); if (result <= 0) break; v_counter = 0; From noreply at buildbot.pypy.org Mon Mar 24 11:24:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 11:24:31 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: bah Message-ID: <20140324102431.ABEC01C054C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70243:4546d131d000 Date: 2014-03-24 11:23 +0100 http://bitbucket.org/pypy/pypy/changeset/4546d131d000/ Log: bah diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -813,7 +813,7 @@ # compare it with the currently-stored jmpbuf mc.CMP_rj(edi.value, (self.SEGMENT_GC, rstm.adr_jmpbuf_ptr)) # if they differ (or if jmpbuf_ptr is already NULL), nothing to do - mc.J_il(rx86.Conditions['NE'], 0) # patched later + mc.J_il8(rx86.Conditions['NE'], 0) # patched later jne_location = mc.get_relative_pos() # # if they are equal, we need to become inevitable now From noreply at buildbot.pypy.org Mon Mar 24 12:23:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 12:23:45 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fixes in _cmp_guard_class: use %gs-based addressing (these CMP are Message-ID: <20140324112345.2E2DB1C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70244:2b68cb5534ee Date: 2014-03-24 12:19 +0100 http://bitbucket.org/pypy/pypy/changeset/2b68cb5534ee/ Log: Fixes in _cmp_guard_class: use %gs-based addressing (these CMP are reading memory from inside the object) and use a non-zero offset to get the TID. diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1766,9 +1766,11 @@ self.implement_guard(guard_token, 'NE') def _cmp_guard_class(self, locs): + inst_loc = locs[0] + assert isinstance(inst_loc, RegLoc) offset = self.cpu.vtable_offset if offset is not None: - self.mc.CMP(mem(self.SEGMENT_NO, locs[0], offset), locs[1]) + self.mc.CMP(mem(self.SEGMENT_GC, inst_loc, offset), locs[1]) else: # XXX hard-coded assumption: to go from an object to its class # we use the following algorithm: @@ -1787,6 +1789,12 @@ # from reading the half-word in the object header. Note that # this half-word is at offset 0 on a little-endian machine; # it would be at offset 2 or 4 on a big-endian machine. + # It is at offset 4 with stm. + if self.cpu.gc_ll_descr.stm: + offset = rstm.tid_offset + else: + offset = 0 + # from rpython.memory.gctypelayout import GCData sizeof_ti = rffi.sizeof(GCData.TYPE_INFO) type_info_group = llop.gc_get_type_info_group(llmemory.Address) @@ -1794,10 +1802,10 @@ expected_typeid = classptr - sizeof_ti - type_info_group if IS_X86_32: expected_typeid >>= 2 - self.mc.CMP16(mem(self.SEGMENT_NO, locs[0], 0), + self.mc.CMP16(mem(self.SEGMENT_GC, inst_loc, offset), ImmedLoc(expected_typeid)) elif IS_X86_64: - self.mc.CMP32_mi((self.SEGMENT_NO, locs[0].value, 0), + self.mc.CMP32_mi((self.SEGMENT_GC, inst_loc.value, offset), expected_typeid) def genop_guard_guard_class(self, ign_1, guard_op, guard_token, locs, ign_2): From noreply at buildbot.pypy.org Mon Mar 24 12:36:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 12:36:28 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fixes Message-ID: <20140324113628.91A611C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70245:3a9be956c290 Date: 2014-03-24 12:35 +0100 http://bitbucket.org/pypy/pypy/changeset/3a9be956c290/ Log: fixes diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2521,7 +2521,7 @@ # pypy_stm_start_transaction(&jmpbuf); # save all registers and the gcmap - self.push_gcmap(mc, gcmap, mov=True) + self.push_gcmap(mc, gcmap, store=True) grp_regs = self._regalloc.rm.reg_bindings.values() xmm_regs = self._regalloc.xrm.reg_bindings.values() self._push_pop_regs_to_frame(True, mc, grp_regs, xmm_regs) @@ -2532,15 +2532,15 @@ # update the two words in the STM_RESUME_BUF, as described # in arch.py. The "learip" pseudo-instruction turns into # what is, in gnu as syntax: lea 0(%rip), %rax (the 0 is - # one byte, patched just below) - mc.LEARIP_rl8(eax.value, 0) + # four bytes, patched just below) + mc.LEARIP_rl32(eax.value, 0) learip_location = mc.get_relative_pos() mc.MOV_sr(STM_JMPBUF_OFS_RIP, eax.value) mc.MOV_sr(STM_JMPBUF_OFS_RSP, esp.value) # offset = mc.get_relative_pos() - learip_location assert 0 < offset <= 127 - mc.overwrite(learip_location - 1, chr(offset)) + mc.overwrite32(learip_location - 4, offset) # ** HERE ** is the place an aborted transaction retries # (when resuming, ebp is garbage, but the STM_RESUME_BUF is # still correct in case of repeated aborting) @@ -2560,7 +2560,7 @@ # patch the JAE above (note that we also skip the guard_not_forced # in the common situation where we jump over the code above) offset = mc.get_relative_pos() - jae_location - mc.overwrite32(jae_location-4, offset) + mc.overwrite32(jae_location - 4, offset) def genop_discard_stm_read(self, op, arglocs): if not IS_X86_64: diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -696,8 +696,7 @@ self.writechar(chr((imm >> 56) & 0xFF)) CQO = insn(rex_w, '\x99') - LEARIP_rl8 = insn(rex_w, '\x8D', register(1, 8), chr(0x05), - immediate(2, 'b')) + LEARIP_rl32 = insn(rex_w, '\x8D', register(1, 8), chr(0x05), immediate(2)) # Three different encodings... following what gcc does. From the # shortest encoding to the longest one. diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -22,6 +22,7 @@ 'stm_threadlocalref_count', 'stm_threadlocalref_addr', 'jit_assembler_call', 'gc_writebarrier', 'shrink_array', 'jit_stm_transaction_break_point', + 'jit_stm_should_break_transaction', ]) ALWAYS_ALLOW_OPERATIONS |= set(lloperation.enum_tryfold_ops()) From noreply at buildbot.pypy.org Mon Mar 24 13:06:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 13:06:51 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Obscure annotation hack Message-ID: <20140324120651.0FA051C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70246:257b3b8fc7a2 Date: 2014-03-24 12:47 +0100 http://bitbucket.org/pypy/pypy/changeset/257b3b8fc7a2/ Log: Obscure annotation hack diff --git a/pypy/module/__pypy__/interp_atomic.py b/pypy/module/__pypy__/interp_atomic.py --- a/pypy/module/__pypy__/interp_atomic.py +++ b/pypy/module/__pypy__/interp_atomic.py @@ -55,7 +55,7 @@ ''' if space.config.translation.stm: from rpython.rlib.rstm import stm_nb_segments - return space.wrap(stm_nb_segments) + return space.wrap(stm_nb_segments + 0) # :-( annotation hack else: return space.wrap(1) From noreply at buildbot.pypy.org Mon Mar 24 13:06:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 13:06:52 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fix this test Message-ID: <20140324120652.4AA531C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70247:08b0b669fc73 Date: 2014-03-24 12:47 +0100 http://bitbucket.org/pypy/pypy/changeset/08b0b669fc73/ Log: fix this test diff --git a/rpython/jit/backend/x86/test/test_jump.py b/rpython/jit/backend/x86/test/test_jump.py --- a/rpython/jit/backend/x86/test/test_jump.py +++ b/rpython/jit/backend/x86/test/test_jump.py @@ -3,9 +3,10 @@ from rpython.jit.backend.x86.regalloc import X86FrameManager from rpython.jit.backend.x86.jump import remap_frame_layout from rpython.jit.backend.x86.jump import remap_frame_layout_mixed +from rpython.jit.backend.x86.rx86 import SEGMENT_NO from rpython.jit.metainterp.history import INT -fm = X86FrameManager(0) +fm = X86FrameManager(0, SEGMENT_NO) frame_pos = fm.frame_pos class MockAssembler: @@ -406,8 +407,8 @@ def regalloc_immedmem2mem(self, x, y): print "?????????????????????????" def main(): - srclocs = [FrameLoc(9999, x, 'i') for x,y in CASE] - dstlocs = [FrameLoc(9999, y, 'i') for x,y in CASE] + srclocs = [FrameLoc(SEGMENT_NO, 9999, x, 'i') for x,y in CASE] + dstlocs = [FrameLoc(SEGMENT_NO, 9999, y, 'i') for x,y in CASE] remap_frame_layout(FakeAssembler(), srclocs, dstlocs, eax) # it works when run directly main() From noreply at buildbot.pypy.org Mon Mar 24 13:06:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 13:06:53 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fixes fixes Message-ID: <20140324120653.8249A1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70248:9ddbe2af8e8a Date: 2014-03-24 13:03 +0100 http://bitbucket.org/pypy/pypy/changeset/9ddbe2af8e8a/ Log: fixes fixes diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -45,15 +45,19 @@ assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 -# The STM resume buffer (on x86-64) is two words wide. Actually, clang + +# The STM resume buffer (on x86-64) is four words wide. Actually, clang # uses three words (see test_stm.py): rbp, rip, rsp. But the value of # rbp is not interesting for the JIT-generated machine code. So the # STM_JMPBUF_OFS is the offset from the stack top to the start of the # buffer, with only words at offset +1 and +2 in this buffer being -# meaningful -- these are the two words overlapping the STM resume -# buffer's location in the diagram above. -STM_RESUME_BUF_WORDS = 16 / WORD +# meaningful. We use ebp, i.e. the word at offset +0, to store the +# resume counter. + +STM_RESUME_BUF_WORDS = 4 # <-- for alignment, it can't be 3 STM_FRAME_FIXED_SIZE = FRAME_FIXED_SIZE + STM_RESUME_BUF_WORDS -STM_JMPBUF_OFS = WORD * (FRAME_FIXED_SIZE - 1) +STM_JMPBUF_OFS = WORD * FRAME_FIXED_SIZE +STM_JMPBUF_OFS_RBP = STM_JMPBUF_OFS + 0 * WORD STM_JMPBUF_OFS_RIP = STM_JMPBUF_OFS + 1 * WORD STM_JMPBUF_OFS_RSP = STM_JMPBUF_OFS + 2 * WORD +# unused: STM_JMPBUF_OFS + 3 * WORD diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -19,7 +19,7 @@ from rpython.jit.backend.x86.arch import ( FRAME_FIXED_SIZE, WORD, IS_X86_64, JITFRAME_FIXED_SIZE, IS_X86_32, PASS_ON_MY_FRAME, STM_FRAME_FIXED_SIZE, STM_JMPBUF_OFS, - STM_JMPBUF_OFS_RIP, STM_JMPBUF_OFS_RSP) + STM_JMPBUF_OFS_RIP, STM_JMPBUF_OFS_RSP, STM_JMPBUF_OFS_RBP) from rpython.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, r8, r9, r10, r11, edi, r12, r13, r14, r15, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, @@ -2537,6 +2537,8 @@ learip_location = mc.get_relative_pos() mc.MOV_sr(STM_JMPBUF_OFS_RIP, eax.value) mc.MOV_sr(STM_JMPBUF_OFS_RSP, esp.value) + mc.XOR(ebp, ebp) + mc.MOV_sr(STM_JMPBUF_OFS_RBP, ebp.value) # offset = mc.get_relative_pos() - learip_location assert 0 < offset <= 127 @@ -2545,11 +2547,15 @@ # (when resuming, ebp is garbage, but the STM_RESUME_BUF is # still correct in case of repeated aborting) # - # call pypy_stm_start_transaction(&jmpbuf) + # call pypy_stm_start_transaction(&jmpbuf, &v_counter) + # where v_counter is abusively stored in the jmpbuf at + # the location for ebp (so that the value in v_counter + # is here found in ebp, if we needed it). mc.LEA_rs(edi.value, STM_JMPBUF_OFS) + mc.LEA_rs(esi.value, STM_JMPBUF_OFS_RBP) mc.CALL(imm(rstm.adr_pypy_stm_start_transaction)) # - # reload ebp (the frame) now + # reload ebp with the frame now self._reload_frame_if_necessary(self.mc) # # restore regs diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -206,6 +206,7 @@ # ____________________________________________________________ class Symbolic(object): + _compare_by_id_ = False def annotation(self): return None @@ -216,11 +217,15 @@ def __cmp__(self, other): if self is other: return 0 + elif self._compare_by_id_ or getattr(other, '_compare_by_id_', False): + return cmp(id(self), id(other)) else: raise TypeError("Symbolics cannot be compared! (%r, %r)" % (self, other)) def __hash__(self): + if self._compare_by_id_: + return object.__hash__(self) raise TypeError("Symbolics are not hashable! %r" % (self,)) def __nonzero__(self): diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -5,28 +5,31 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rlib.jit import dont_look_inside +class CFlexSymbolic(CDefinedIntSymbolic): + _compare_by_id_ = True + TID = rffi.UINT -tid_offset = CDefinedIntSymbolic('offsetof(struct rpyobj_s, tid)') -stm_nb_segments = CDefinedIntSymbolic('STM_NB_SEGMENTS') -adr_nursery_free = CDefinedIntSymbolic('((long)&STM_SEGMENT->nursery_current)') -adr_nursery_top = CDefinedIntSymbolic('((long)&STM_SEGMENT->nursery_end)') +tid_offset = CFlexSymbolic('offsetof(struct rpyobj_s, tid)') +stm_nb_segments = CFlexSymbolic('STM_NB_SEGMENTS') +adr_nursery_free = CFlexSymbolic('((long)&STM_SEGMENT->nursery_current)') +adr_nursery_top = CFlexSymbolic('((long)&STM_SEGMENT->nursery_end)') adr_pypy_stm_nursery_low_fill_mark = ( - CDefinedIntSymbolic('((long)&pypy_stm_nursery_low_fill_mark)')) + CFlexSymbolic('((long)&pypy_stm_nursery_low_fill_mark)')) adr_transaction_read_version = ( - CDefinedIntSymbolic('((long)&STM_SEGMENT->transaction_read_version)')) + CFlexSymbolic('((long)&STM_SEGMENT->transaction_read_version)')) adr_jmpbuf_ptr = ( - CDefinedIntSymbolic('((long)&STM_SEGMENT->jmpbuf_ptr)')) -adr_write_slowpath = CDefinedIntSymbolic('((long)&_stm_write_slowpath)') + CFlexSymbolic('((long)&STM_SEGMENT->jmpbuf_ptr)')) +adr_write_slowpath = CFlexSymbolic('((long)&_stm_write_slowpath)') adr_jit_default_msg = ( - CDefinedIntSymbolic('((long)(char *)"return from JITted function")')) + CFlexSymbolic('((long)(char *)"return from JITted function")')) adr__stm_become_inevitable = ( - CDefinedIntSymbolic('((long)&_stm_become_inevitable)')) + CFlexSymbolic('((long)&_stm_become_inevitable)')) adr_stm_commit_transaction = ( - CDefinedIntSymbolic('((long)&stm_commit_transaction)')) + CFlexSymbolic('((long)&stm_commit_transaction)')) adr_pypy_stm_start_transaction = ( - CDefinedIntSymbolic('((long)&pypy_stm_start_transaction)')) + CFlexSymbolic('((long)&pypy_stm_start_transaction)')) def jit_stm_transaction_break_point(): From noreply at buildbot.pypy.org Mon Mar 24 13:06:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 13:06:54 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: test for 9ddbe2af8e8a Message-ID: <20140324120654.B37021C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70249:d3b5d93d4ffa Date: 2014-03-24 13:05 +0100 http://bitbucket.org/pypy/pypy/changeset/d3b5d93d4ffa/ Log: test for 9ddbe2af8e8a diff --git a/rpython/rlib/test/test_rstm.py b/rpython/rlib/test/test_rstm.py --- a/rpython/rlib/test/test_rstm.py +++ b/rpython/rlib/test/test_rstm.py @@ -1,10 +1,14 @@ import thread, time -from rpython.rlib.rstm import ThreadLocalReference +from rpython.rlib import rstm + +def test_symbolics(): + assert rstm.adr_nursery_free == rstm.adr_nursery_free + assert rstm.adr_nursery_free != rstm.adr_nursery_top def test_tlref_untranslated(): class FooBar(object): pass - t = ThreadLocalReference(FooBar) + t = rstm.ThreadLocalReference(FooBar) results = [] def subthread(): x = FooBar() From noreply at buildbot.pypy.org Mon Mar 24 15:05:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 15:05:49 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Remove these markers again, as they were in stmgc-c4 Message-ID: <20140324140549.CD28B1C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70250:623d92bb9ac3 Date: 2014-03-24 15:04 +0100 http://bitbucket.org/pypy/pypy/changeset/623d92bb9ac3/ Log: Remove these markers again, as they were in stmgc-c4 diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -331,14 +331,12 @@ next.prev = prev @staticmethod - @rgc.no_collect def _release_gil_shadowstack(): before = rffi.aroundstate.before if before: before() @staticmethod - @rgc.no_collect def _reacquire_gil_shadowstack(): after = rffi.aroundstate.after if after: From noreply at buildbot.pypy.org Mon Mar 24 15:05:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 15:05:51 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Add another TODO Message-ID: <20140324140551.355091C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70251:e134e6f294b9 Date: 2014-03-24 15:04 +0100 http://bitbucket.org/pypy/pypy/changeset/e134e6f294b9/ Log: Add another TODO diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -43,6 +43,10 @@ ------------------------------------------------------------ +remap the group of read markers corresponding to a nusery into +a single physical "garbage" page + +------------------------------------------------------------ From noreply at buildbot.pypy.org Mon Mar 24 15:05:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 15:05:52 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: merge heads Message-ID: <20140324140552.638671C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70252:da914a210557 Date: 2014-03-24 15:04 +0100 http://bitbucket.org/pypy/pypy/changeset/da914a210557/ Log: merge heads diff --git a/rpython/rlib/test/test_rstm.py b/rpython/rlib/test/test_rstm.py --- a/rpython/rlib/test/test_rstm.py +++ b/rpython/rlib/test/test_rstm.py @@ -1,10 +1,14 @@ import thread, time -from rpython.rlib.rstm import ThreadLocalReference +from rpython.rlib import rstm + +def test_symbolics(): + assert rstm.adr_nursery_free == rstm.adr_nursery_free + assert rstm.adr_nursery_free != rstm.adr_nursery_top def test_tlref_untranslated(): class FooBar(object): pass - t = ThreadLocalReference(FooBar) + t = rstm.ThreadLocalReference(FooBar) results = [] def subthread(): x = FooBar() From noreply at buildbot.pypy.org Mon Mar 24 15:14:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 15:14:45 +0100 (CET) Subject: [pypy-commit] stmgc default: Restore the setting of PROT_NONE pages after a fork() Message-ID: <20140324141445.596D01C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1086:62382817df4c Date: 2014-03-24 15:14 +0100 http://bitbucket.org/pypy/stmgc/changeset/62382817df4c/ Log: Restore the setting of PROT_NONE pages after a fork() diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -12,6 +12,7 @@ static bool fork_was_in_transaction; static char *setup_mmap(char *reason); /* forward, in setup.c */ +static void setup_protection_settings(void); /* forward, in setup.c */ static pthread_t *_get_cpth(stm_thread_local_t *);/* forward, in setup.c */ @@ -226,6 +227,10 @@ } assert(stm_all_thread_locals == fork_this_tl); + /* Restore the base setting of PROT_NONE pages. + */ + setup_protection_settings(); + /* Make all pages shared again. */ uintptr_t pagenum, endpagenum; diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -14,6 +14,30 @@ return result; } +static void setup_protection_settings(void) +{ + /* The segment 0 is not used to run transactions, but contains the + shared copy of the pages. We mprotect all pages before so that + accesses fail, up to and including the pages corresponding to the + nurseries of the other segments. */ + mprotect(stm_object_pages, END_NURSERY_PAGE * 4096UL, PROT_NONE); + + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *segment_base = get_segment_base(i); + + /* In each segment, the first page is where TLPREFIX'ed + NULL accesses land. We mprotect it so that accesses fail. */ + mprotect(segment_base, 4096, PROT_NONE); + + /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ + if (FIRST_READMARKER_PAGE > 2) + mprotect(segment_base + 8192, + (FIRST_READMARKER_PAGE - 2) * 4096UL, + PROT_NONE); + } +} + void stm_setup(void) { /* Check that some values are acceptable */ @@ -32,33 +56,18 @@ assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); - - /* The segment 0 is not used to run transactions, but contains the - shared copy of the pages. We mprotect all pages before so that - accesses fail, up to and including the pages corresponding to the - nurseries of the other segments. */ - mprotect(stm_object_pages, END_NURSERY_PAGE * 4096UL, PROT_NONE); + setup_protection_settings(); long i; for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); - /* In each segment, the first page is where TLPREFIX'ed - NULL accesses land. We mprotect it so that accesses fail. */ - mprotect(segment_base, 4096, PROT_NONE); - /* Fill the TLS page (page 1) with 0xDC, for debugging */ memset(REAL_ADDRESS(segment_base, 4096), 0xDC, 4096); /* Make a "hole" at STM_PSEGMENT (which includes STM_SEGMENT) */ memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, sizeof(*STM_PSEGMENT)); - /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ - if (FIRST_READMARKER_PAGE > 2) - mprotect(segment_base + 8192, - (FIRST_READMARKER_PAGE - 2) * 4096UL, - PROT_NONE); - /* Initialize STM_PSEGMENT */ struct stm_priv_segment_info_s *pr = get_priv_segment(i); assert(1 <= i && i < 255); /* 255 is WL_VISITED in gcpage.c */ From noreply at buildbot.pypy.org Mon Mar 24 15:19:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 15:19:26 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: oups Message-ID: <20140324141926.C3F8F1C0225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70253:ccb9cec89b0d Date: 2014-03-24 15:18 +0100 http://bitbucket.org/pypy/pypy/changeset/ccb9cec89b0d/ Log: oups diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -7,6 +7,7 @@ /* 0 = not initialized; 1 = normal mode; 2 or more = atomic mode */ __thread long pypy_stm_ready_atomic; __thread uintptr_t pypy_stm_nursery_low_fill_mark; +__thread uintptr_t pypy_stm_nursery_low_fill_mark_saved; extern Signed pypy_stmcb_size_rounded_up(void*); extern void pypy_stmcb_trace(void*, void(*)(void*)); From noreply at buildbot.pypy.org Mon Mar 24 15:42:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 15:42:13 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Also implement stm_should_break_transaction not immediately Message-ID: <20140324144213.1D67C1C0161@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70254:15c9c0baed53 Date: 2014-03-24 15:41 +0100 http://bitbucket.org/pypy/pypy/changeset/15c9c0baed53/ Log: Also implement stm_should_break_transaction not immediately followed by a guard diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2479,17 +2479,23 @@ # so if it is followed with a JB, it will follow the jump if # we should break the transaction now. # + if not IS_X86_64: + todo() # "needed for X86_64_SCRATCH_REG" psnlfm_adr = rstm.adr_pypy_stm_nursery_low_fill_mark self.mc.MOV(X86_64_SCRATCH_REG, self.heap_tl(psnlfm_adr)) nf_adr = rstm.adr_nursery_free assert rx86.fits_in_32bits(nf_adr) # because it is in the 2nd page self.mc.CMP_rj(X86_64_SCRATCH_REG.value, (self.SEGMENT_GC, nf_adr)) + def genop_stm_should_break_transaction(self, op, arglocs, result_loc): + self._generate_cmp_break_transaction() + rl = result_loc.lowest8bits() + self.mc.SET_ir(rx86.Conditions['B'], rl.value) + self.mc.MOVZX8_rr(result_loc.value, rl.value) + def genop_guard_stm_should_break_transaction(self, op, guard_op, guard_token, arglocs, result_loc): - if not IS_X86_64: - todo() # "needed for X86_64_SCRATCH_REG" self._generate_cmp_break_transaction() if guard_op.getopnum() == rop.GUARD_FALSE: self.implement_guard(guard_token, 'B') # JB goes to "yes, break" diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1236,9 +1236,12 @@ def consider_stm_should_break_transaction(self, op, guard_op): - if guard_op is None: - self.not_implemented_op(op) - self.perform_with_guard(op, guard_op, [], None) + if guard_op is not None: + self.perform_with_guard(op, guard_op, [], None) + else: + resloc = self.rm.force_allocate_reg(op.result, + need_lower_byte=True) + self.perform(op, [], resloc) def consider_stm_transaction_break(self, op, guard_op): self.perform_with_guard(op, guard_op, [], None) From noreply at buildbot.pypy.org Mon Mar 24 16:11:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 16:11:15 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: add TODO Message-ID: <20140324151115.06FE01C1018@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70255:148e8d27851a Date: 2014-03-24 16:10 +0100 http://bitbucket.org/pypy/pypy/changeset/148e8d27851a/ Log: add TODO diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -48,6 +48,11 @@ ------------------------------------------------------------ +JIT: add an artificial malloc if the loop is so small as to contain +any! + +------------------------------------------------------------ + From noreply at buildbot.pypy.org Mon Mar 24 16:18:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 16:18:09 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Bah, loading the address of the string "return from JITted function" Message-ID: <20140324151809.75C7B1C315C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70256:5a1c619c20f6 Date: 2014-03-24 16:17 +0100 http://bitbucket.org/pypy/pypy/changeset/5a1c619c20f6/ Log: Bah, loading the address of the string "return from JITted function" is better than getting an address out of the 8 bytes "return f" diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -817,7 +817,7 @@ jne_location = mc.get_relative_pos() # # if they are equal, we need to become inevitable now - mc.MOV_rj(edi.value, (self.SEGMENT_NO, rstm.adr_jit_default_msg)) + mc.MOV_ri(edi.value, rstm.adr_jit_default_msg) mc.CALL(imm(rstm.adr__stm_become_inevitable)) # there could have been a collection in _stm_become_inevitable; # reload the frame into ebp From noreply at buildbot.pypy.org Mon Mar 24 18:38:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 18:38:47 +0100 (CET) Subject: [pypy-commit] stmgc default: Yay, managed to isolate (one of) the misbehavior of PyPy Message-ID: <20140324173847.E7CE01D272F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1087:2e0533b4f814 Date: 2014-03-24 18:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/2e0533b4f814/ Log: Yay, managed to isolate (one of) the misbehavior of PyPy diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -314,3 +314,27 @@ assert stm_get_char(lp2, 10) == 'C' assert stm_get_weakref(lp1) == ffi.NULL assert stm_get_weakref(lp3) == lp2 + + def test_weakref_bug1(self): + self.start_transaction() + lp0 = stm_allocate(16) + self.push_root(lp0) + self.commit_transaction() + # + self.start_transaction() + lp0 = self.pop_root() + self.push_root(lp0) + stm_write(lp0) # privatize page + # + self.switch(1) + self.start_transaction() + lp1 = stm_allocate_weakref(lp0) + assert stm_get_weakref(lp1) == lp0 + self.push_root(lp1) + # + self.switch(0) + stm_major_collect() + # + self.switch(1) + lp1 = self.pop_root() + assert stm_get_weakref(lp1) == lp0 From noreply at buildbot.pypy.org Mon Mar 24 18:42:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 18:42:19 +0100 (CET) Subject: [pypy-commit] stmgc default: fix 2e0533b4f814 Message-ID: <20140324174219.A235D1C0161@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1088:256d9b6aba2e Date: 2014-03-24 18:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/256d9b6aba2e/ Log: fix 2e0533b4f814 diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -125,7 +125,9 @@ } ssize_t size = 16; - object_t *pointing_to = *WEAKREF_PTR(weakref, size); + stm_char *wr = (stm_char *)WEAKREF_PTR(weakref, size); + char *real_wr = REAL_ADDRESS(stm_object_pages, wr); + object_t *pointing_to = *(object_t **)real_wr; assert(pointing_to != NULL); if (!mark_visited_test(pointing_to)) { //assert(flag_page_private[(uintptr_t)weakref / 4096UL] != PRIVATE_PAGE); From noreply at buildbot.pypy.org Mon Mar 24 18:46:51 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 24 Mar 2014 18:46:51 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: start to generalize for different system code pages Message-ID: <20140324174651.B13EE1C0166@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70257:17fbb29b7174 Date: 2014-03-24 06:33 +0200 http://bitbucket.org/pypy/pypy/changeset/17fbb29b7174/ Log: start to generalize for different system code pages diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -1,3 +1,5 @@ +import sys + class AppTestCodecs: spaceconfig = { "usemodules": ['unicodedata', 'struct', 'binascii'], @@ -137,7 +139,9 @@ class AppTestPartialEvaluation: - spaceconfig = dict(usemodules=('array',)) + spaceconfig = dict(usemodules=['array',]) + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_winreg') def test_partial_utf8(self): import _codecs @@ -694,8 +698,18 @@ import sys if sys.platform != 'win32': return + toencode = u'caf\xe9', 'caf\xe9' + try: + #test for non-latin1 codepage, more general test needed + import _winreg + key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, + r'System\CurrentControlSet\Control\Nls\CodePage') + if _winreg.QueryValueEx(key, 'ACP')[0] == u'1255': #non-latin1 + toencode = u'caf\xbf','caf\xbf' + except: + assert False, 'cannot test mbcs on this windows system, check code page' assert u'test'.encode('mbcs') == 'test' - assert u'caf\xe9'.encode('mbcs') == '?' + assert toencode[0].encode('mbcs') == toencode[1] assert u'\u040a'.encode('mbcs') == '?' # some cyrillic letter assert 'cafx\e9'.decode('mbcs') == u'cafx\e9' From noreply at buildbot.pypy.org Mon Mar 24 18:46:52 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 24 Mar 2014 18:46:52 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: skip on non-latin1 code pages Message-ID: <20140324174652.DFDCD1C0166@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70258:161293a348ce Date: 2014-03-24 09:42 +0200 http://bitbucket.org/pypy/pypy/changeset/161293a348ce/ Log: skip on non-latin1 code pages diff --git a/lib-python/2.7/test/test_genericpath.py b/lib-python/2.7/test/test_genericpath.py --- a/lib-python/2.7/test/test_genericpath.py +++ b/lib-python/2.7/test/test_genericpath.py @@ -231,9 +231,12 @@ unicwd = u'\xe7w\xf0' try: fsencoding = test_support.TESTFN_ENCODING or "ascii" - unicwd.encode(fsencoding) + asciival = unicwd.encode(fsencoding) + v = asciival.find('?') + if v >= 0: + raise UnicodeEncodeError(fsencoding, unicwd, v, v, asciival) except (AttributeError, UnicodeEncodeError): - # FS encoding is probably ASCII + # FS encoding is probably ASCII or windows and codepage is non-Latin1 pass else: with test_support.temp_cwd(unicwd): From noreply at buildbot.pypy.org Mon Mar 24 18:46:54 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 24 Mar 2014 18:46:54 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: fix for llgraph untranslated testing Message-ID: <20140324174654.1BEB91C0166@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70259:32fe69874e10 Date: 2014-03-24 19:45 +0200 http://bitbucket.org/pypy/pypy/changeset/32fe69874e10/ Log: fix for llgraph untranslated testing diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -366,9 +366,15 @@ restype = get_ctypes_type(T.TO.RESULT) try: kwds = {'use_errno': True} + if getattr(T.TO, 'ABI', 'FFI_STDCALL'): + # for win32 system call + return ctypes.WINFUNCTYPE(restype, *argtypes, **kwds) return ctypes.CFUNCTYPE(restype, *argtypes, **kwds) except TypeError: # unexpected 'use_errno' argument, old ctypes version + if getattr(T.TO, 'ABI', 'FFI_STDCALL'): + # for win32 system call + return ctypes.WINFUNCTYPE(restype, *argtypes) return ctypes.CFUNCTYPE(restype, *argtypes) elif isinstance(T.TO, lltype.OpaqueType): return ctypes.c_void_p From noreply at buildbot.pypy.org Mon Mar 24 18:48:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 18:48:08 +0100 (CET) Subject: [pypy-commit] stmgc default: add asserts Message-ID: <20140324174808.D296F1C0166@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1089:ee65d7dc215e Date: 2014-03-24 18:47 +0100 http://bitbucket.org/pypy/stmgc/changeset/ee65d7dc215e/ Log: add asserts diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -222,8 +222,13 @@ realnursery = REAL_ADDRESS(pseg->pub.segment_base, _stm_nursery_start); nursery_used = pseg->pub.nursery_current - (stm_char *)_stm_nursery_start; + OPT_ASSERT((nursery_used & 7) == 0); memset(realnursery, 0, nursery_used); + /* assert that the rest of the nursery still contains only zeroes */ + assert_memset_zero(realnursery + nursery_used, + (NURSERY_END - _stm_nursery_start) - nursery_used); + pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; /* free any object left from 'young_outside_nursery' */ @@ -374,6 +379,7 @@ void _stm_set_nursery_free_count(uint64_t free_count) { assert(free_count <= NURSERY_SIZE); + assert((free_count & 7) == 0); _stm_nursery_start = NURSERY_END - free_count; long i; From noreply at buildbot.pypy.org Mon Mar 24 18:48:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 18:48:35 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: add an item Message-ID: <20140324174835.6E7341C0166@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70260:580b1e7c7142 Date: 2014-03-24 18:47 +0100 http://bitbucket.org/pypy/pypy/changeset/580b1e7c7142/ Log: add an item diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -53,6 +53,12 @@ ------------------------------------------------------------ +**URGENT** +become_inevitable: getarrayitem/raw, for the jit/metainterp/counters.py + +------------------------------------------------------------ + + From noreply at buildbot.pypy.org Mon Mar 24 18:49:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 18:49:03 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/ee65d7dc215e Message-ID: <20140324174903.BF9F81C0166@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70261:5f7df771c0e1 Date: 2014-03-24 18:48 +0100 http://bitbucket.org/pypy/pypy/changeset/5f7df771c0e1/ Log: import stmgc/ee65d7dc215e diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -a33130d9f35c +ee65d7dc215e diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c --- a/rpython/translator/stm/src_stm/stm/forksupport.c +++ b/rpython/translator/stm/src_stm/stm/forksupport.c @@ -13,6 +13,7 @@ static bool fork_was_in_transaction; static char *setup_mmap(char *reason); /* forward, in setup.c */ +static void setup_protection_settings(void); /* forward, in setup.c */ static pthread_t *_get_cpth(stm_thread_local_t *);/* forward, in setup.c */ @@ -227,6 +228,10 @@ } assert(stm_all_thread_locals == fork_this_tl); + /* Restore the base setting of PROT_NONE pages. + */ + setup_protection_settings(); + /* Make all pages shared again. */ uintptr_t pagenum, endpagenum; diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -223,8 +223,13 @@ realnursery = REAL_ADDRESS(pseg->pub.segment_base, _stm_nursery_start); nursery_used = pseg->pub.nursery_current - (stm_char *)_stm_nursery_start; + OPT_ASSERT((nursery_used & 7) == 0); memset(realnursery, 0, nursery_used); + /* assert that the rest of the nursery still contains only zeroes */ + assert_memset_zero(realnursery + nursery_used, + (NURSERY_END - _stm_nursery_start) - nursery_used); + pseg->pub.nursery_current = (stm_char *)_stm_nursery_start; /* free any object left from 'young_outside_nursery' */ @@ -375,6 +380,7 @@ void _stm_set_nursery_free_count(uint64_t free_count) { assert(free_count <= NURSERY_SIZE); + assert((free_count & 7) == 0); _stm_nursery_start = NURSERY_END - free_count; long i; diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -15,6 +15,30 @@ return result; } +static void setup_protection_settings(void) +{ + /* The segment 0 is not used to run transactions, but contains the + shared copy of the pages. We mprotect all pages before so that + accesses fail, up to and including the pages corresponding to the + nurseries of the other segments. */ + mprotect(stm_object_pages, END_NURSERY_PAGE * 4096UL, PROT_NONE); + + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *segment_base = get_segment_base(i); + + /* In each segment, the first page is where TLPREFIX'ed + NULL accesses land. We mprotect it so that accesses fail. */ + mprotect(segment_base, 4096, PROT_NONE); + + /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ + if (FIRST_READMARKER_PAGE > 2) + mprotect(segment_base + 8192, + (FIRST_READMARKER_PAGE - 2) * 4096UL, + PROT_NONE); + } +} + void stm_setup(void) { /* Check that some values are acceptable */ @@ -33,33 +57,18 @@ assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); - - /* The segment 0 is not used to run transactions, but contains the - shared copy of the pages. We mprotect all pages before so that - accesses fail, up to and including the pages corresponding to the - nurseries of the other segments. */ - mprotect(stm_object_pages, END_NURSERY_PAGE * 4096UL, PROT_NONE); + setup_protection_settings(); long i; for (i = 1; i <= NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); - /* In each segment, the first page is where TLPREFIX'ed - NULL accesses land. We mprotect it so that accesses fail. */ - mprotect(segment_base, 4096, PROT_NONE); - /* Fill the TLS page (page 1) with 0xDC, for debugging */ memset(REAL_ADDRESS(segment_base, 4096), 0xDC, 4096); /* Make a "hole" at STM_PSEGMENT (which includes STM_SEGMENT) */ memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, sizeof(*STM_PSEGMENT)); - /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ - if (FIRST_READMARKER_PAGE > 2) - mprotect(segment_base + 8192, - (FIRST_READMARKER_PAGE - 2) * 4096UL, - PROT_NONE); - /* Initialize STM_PSEGMENT */ struct stm_priv_segment_info_s *pr = get_priv_segment(i); assert(1 <= i && i < 255); /* 255 is WL_VISITED in gcpage.c */ diff --git a/rpython/translator/stm/src_stm/stm/weakref.c b/rpython/translator/stm/src_stm/stm/weakref.c --- a/rpython/translator/stm/src_stm/stm/weakref.c +++ b/rpython/translator/stm/src_stm/stm/weakref.c @@ -126,7 +126,9 @@ } ssize_t size = 16; - object_t *pointing_to = *WEAKREF_PTR(weakref, size); + stm_char *wr = (stm_char *)WEAKREF_PTR(weakref, size); + char *real_wr = REAL_ADDRESS(stm_object_pages, wr); + object_t *pointing_to = *(object_t **)real_wr; assert(pointing_to != NULL); if (!mark_visited_test(pointing_to)) { //assert(flag_page_private[(uintptr_t)weakref / 4096UL] != PRIVATE_PAGE); From noreply at buildbot.pypy.org Mon Mar 24 19:55:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 19:55:30 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Add a bug (I think) Message-ID: <20140324185530.5BC471D297A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70262:920ba881c387 Date: 2014-03-24 19:54 +0100 http://bitbucket.org/pypy/pypy/changeset/920ba881c387/ Log: Add a bug (I think) diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -58,6 +58,14 @@ ------------------------------------------------------------ +**BUG** assembler.py: _call_footer(): we need to set the gcmap to +something around the call to _stm_become_inevitable()! Or else it +doesn't work and we need to call a different version of +_stm_become_inevitable() that will never attempt to pause or mark a +safe-point + +------------------------------------------------------------ + From noreply at buildbot.pypy.org Mon Mar 24 20:22:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 20:22:39 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Backed out changeset 920ba881c387 Message-ID: <20140324192239.901701D2A0D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70263:24e825aa4bbd Date: 2014-03-24 20:21 +0100 http://bitbucket.org/pypy/pypy/changeset/24e825aa4bbd/ Log: Backed out changeset 920ba881c387 Wrong, it's not a bug diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -58,14 +58,6 @@ ------------------------------------------------------------ -**BUG** assembler.py: _call_footer(): we need to set the gcmap to -something around the call to _stm_become_inevitable()! Or else it -doesn't work and we need to call a different version of -_stm_become_inevitable() that will never attempt to pause or mark a -safe-point - ------------------------------------------------------------- - From noreply at buildbot.pypy.org Mon Mar 24 20:24:55 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 24 Mar 2014 20:24:55 +0100 (CET) Subject: [pypy-commit] benchmarks single-run: add multithreaded richards Message-ID: <20140324192455.1BFE91D2A1E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r237:7180631ee8db Date: 2014-03-24 21:24 +0200 http://bitbucket.org/pypy/benchmarks/changeset/7180631ee8db/ Log: add multithreaded richards diff --git a/lib/pypy/include/pypy_decl.h b/lib/pypy/include/pypy_decl.h --- a/lib/pypy/include/pypy_decl.h +++ b/lib/pypy/include/pypy_decl.h @@ -513,17 +513,17 @@ PyAPI_DATA(PyTypeObject) PySlice_Type; PyAPI_DATA(PyObject*) PyExc_IOError; PyAPI_DATA(PyObject*) PyExc_RuntimeError; -PyAPI_DATA(PyObject*) PyExc_SystemError; +PyAPI_DATA(PyObject*) PyExc_AttributeError; PyAPI_DATA(PyObject*) PyExc_NameError; PyAPI_DATA(PyObject*) PyExc_MemoryError; PyAPI_DATA(PyObject*) PyExc_SystemExit; PyAPI_DATA(PyTypeObject) PyModule_Type; PyAPI_DATA(PyTypeObject) PyBaseObject_Type; -PyAPI_DATA(PyObject*) PyExc_UnicodeTranslateError; -PyAPI_DATA(PyObject*) PyExc_UnicodeWarning; +PyAPI_DATA(PyObject*) PyExc_FloatingPointError; +PyAPI_DATA(PyObject*) PyExc_UnicodeDecodeError; PyAPI_DATA(PyObject*) PyExc_Exception; PyAPI_DATA(PyObject*) PyExc_TypeError; -PyAPI_DATA(PyObject*) PyExc_AttributeError; +PyAPI_DATA(PyObject*) PyExc_SystemError; PyAPI_DATA(PyObject*) PyExc_ReferenceError; PyAPI_DATA(PyTypeObject) PyNotImplemented_Type; PyAPI_DATA(PyTypeObject) PySet_Type; @@ -555,14 +555,14 @@ PyAPI_DATA(PyObject*) PyExc_BytesWarning; PyAPI_DATA(PyObject*) PyExc_DeprecationWarning; PyAPI_DATA(PyObject*) PyExc_SyntaxError; -PyAPI_DATA(PyObject*) PyExc_UnicodeDecodeError; +PyAPI_DATA(PyObject*) PyExc_UnicodeWarning; PyAPI_DATA(PyObject*) PyExc_ZeroDivisionError; PyAPI_DATA(PyTypeObject) PyFloat_Type; -PyAPI_DATA(PyTypeObject) PyBaseString_Type; +PyAPI_DATA(PyObject*) PyExc_RuntimeWarning; PyAPI_DATA(PyObject) _Py_NoneStruct; +PyAPI_DATA(PyObject*) PyExc_IndentationError; +PyAPI_DATA(PyObject*) PyExc_AssertionError; PyAPI_DATA(PyObject*) PyExc_GeneratorExit; -PyAPI_DATA(PyObject*) PyExc_AssertionError; -PyAPI_DATA(PyObject*) PyExc_RuntimeWarning; PyAPI_DATA(PyObject*) PyExc_ImportWarning; PyAPI_DATA(PyObject*) PyExc_UnicodeEncodeError; PyAPI_DATA(PyTypeObject) PyInt_Type; @@ -571,8 +571,8 @@ PyAPI_DATA(PyObject*) PyExc_OSError; PyAPI_DATA(PyObject*) PyExc_KeyError; PyAPI_DATA(PyObject*) PyExc_SyntaxWarning; +PyAPI_DATA(PyTypeObject) PyBaseString_Type; PyAPI_DATA(PyObject*) PyExc_StopIteration; -PyAPI_DATA(PyObject*) PyExc_IndentationError; PyAPI_DATA(PyObject*) PyExc_NotImplementedError; PyAPI_DATA(PyObject*) PyExc_ImportError; PyAPI_DATA(PyDateTime_CAPI*) PyDateTimeAPI; @@ -582,7 +582,7 @@ PyAPI_DATA(PyTypeObject) PyClass_Type; PyAPI_DATA(PyTypeObject) PyType_Type; PyAPI_DATA(PyTypeObject) PyMemoryView_Type; -PyAPI_DATA(PyObject*) PyExc_FloatingPointError; +PyAPI_DATA(PyObject*) PyExc_UnicodeTranslateError; PyAPI_DATA(PyObject*) PyExc_LookupError; PyAPI_DATA(PyObject*) PyExc_EOFError; PyAPI_DATA(PyObject*) PyExc_BufferError; diff --git a/lib/pypy/rpython/translator/c/src/signals.o b/lib/pypy/rpython/translator/c/src/signals.o index 166016f96874014a34535131bd7cfed3aabe09eb..08b3649ae1483b939bb048d11ecb051f62ffe15d GIT binary patch [cut] diff --git a/lib/pypy/rpython/translator/c/src/stacklet/stacklet.o b/lib/pypy/rpython/translator/c/src/stacklet/stacklet.o index e42497399f1f975b42e62fbf236498014c21f654..6dba4dd0d4a6480683502d1568c529dbaecf9174 GIT binary patch [cut] diff --git a/lib/pypy/rpython/translator/c/src/thread.o b/lib/pypy/rpython/translator/c/src/thread.o index 9b7dcc7cb6e9c6eb94216acf121fbcd5b32297e5..06072cecd32b5f5afce76655ad919f8ff637714c GIT binary patch [cut] diff --git a/multithread/multithread-richards.py b/multithread/multithread-richards.py new file mode 100755 --- /dev/null +++ b/multithread/multithread-richards.py @@ -0,0 +1,458 @@ +# based on a Java version: +# Based on original version written in BCPL by Dr Martin Richards +# in 1981 at Cambridge University Computer Laboratory, England +# and a C++ version derived from a Smalltalk version written by +# L Peter Deutsch. +# Java version: Copyright (C) 1995 Sun Microsystems, Inc. +# Translation from C++, Mario Wolczko +# Outer loop added by Alex Jacoby + +import thread, os +#from __pypy__.thread import atomic + + +# Task IDs +I_IDLE = 1 +I_WORK = 2 +I_HANDLERA = 3 +I_HANDLERB = 4 +I_DEVA = 5 +I_DEVB = 6 + +# Packet types +K_DEV = 1000 +K_WORK = 1001 + +# Packet + +BUFSIZE = 4 + +BUFSIZE_RANGE = range(BUFSIZE) + +class Packet(object): + def __init__(self,l,i,k): + self.link = l + self.ident = i + self.kind = k + self.datum = 0 + self.data = [0] * BUFSIZE + + def append_to(self,lst): + self.link = None + if lst is None: + return self + else: + p = lst + next = p.link + while next is not None: + p = next + next = p.link + p.link = self + return lst + +# Task Records + +class TaskRec(object): + pass + +class DeviceTaskRec(TaskRec): + def __init__(self): + self.pending = None + +class IdleTaskRec(TaskRec): + def __init__(self): + self.control = 1 + self.count = 10000 + +class HandlerTaskRec(TaskRec): + def __init__(self): + self.work_in = None + self.device_in = None + + def workInAdd(self,p): + self.work_in = p.append_to(self.work_in) + return self.work_in + + def deviceInAdd(self,p): + self.device_in = p.append_to(self.device_in) + return self.device_in + +class WorkerTaskRec(TaskRec): + def __init__(self): + self.destination = I_HANDLERA + self.count = 0 +# Task + +class TaskState(object): + def __init__(self): + self.packet_pending = True + self.task_waiting = False + self.task_holding = False + + def packetPending(self): + self.packet_pending = True + self.task_waiting = False + self.task_holding = False + return self + + def waiting(self): + self.packet_pending = False + self.task_waiting = True + self.task_holding = False + return self + + def running(self): + self.packet_pending = False + self.task_waiting = False + self.task_holding = False + return self + + def waitingWithPacket(self): + self.packet_pending = True + self.task_waiting = True + self.task_holding = False + return self + + def isPacketPending(self): + return self.packet_pending + + def isTaskWaiting(self): + return self.task_waiting + + def isTaskHolding(self): + return self.task_holding + + def isTaskHoldingOrWaiting(self): + return self.task_holding or (not self.packet_pending and self.task_waiting) + + def isWaitingWithPacket(self): + return self.packet_pending and self.task_waiting and not self.task_holding + + + + + +tracing = False +layout = 0 + +def trace(a): + global layout + layout -= 1 + if layout <= 0: + print + layout = 50 + print a, + + +TASKTABSIZE = 10 + +class TaskWorkArea(object): + def __init__(self): + self.taskTab = [None] * TASKTABSIZE + + self.taskList = None + + self.holdCount = 0 + self.qpktCount = 0 + +class Task(TaskState): + + + def __init__(self,i,p,w,initialState,r, taskWorkArea): + self.taskWorkArea = taskWorkArea + self.link = taskWorkArea.taskList + self.ident = i + self.priority = p + self.input = w + + self.packet_pending = initialState.isPacketPending() + self.task_waiting = initialState.isTaskWaiting() + self.task_holding = initialState.isTaskHolding() + + self.handle = r + + taskWorkArea.taskList = self + taskWorkArea.taskTab[i] = self + + def fn(self,pkt,r): + raise NotImplementedError + + + def addPacket(self,p,old): + if self.input is None: + self.input = p + self.packet_pending = True + if self.priority > old.priority: + return self + else: + p.append_to(self.input) + return old + + + def runTask(self): + if self.isWaitingWithPacket(): + msg = self.input + self.input = msg.link + if self.input is None: + self.running() + else: + self.packetPending() + else: + msg = None + + return self.fn(msg,self.handle) + + + def waitTask(self): + self.task_waiting = True + return self + + + def hold(self): + self.taskWorkArea.holdCount += 1 + self.task_holding = True + return self.link + + + def release(self,i): + t = self.findtcb(i) + t.task_holding = False + if t.priority > self.priority: + return t + else: + return self + + + def qpkt(self,pkt): + t = self.findtcb(pkt.ident) + self.taskWorkArea.qpktCount += 1 + pkt.link = None + pkt.ident = self.ident + return t.addPacket(pkt,self) + + + def findtcb(self,id): + t = self.taskWorkArea.taskTab[id] + if t is None: + raise Exception("Bad task id %d" % id) + return t + + +# DeviceTask + + +class DeviceTask(Task): + def __init__(self,i,p,w,s,r, taskWorkArea): + Task.__init__(self,i,p,w,s,r, taskWorkArea) + + def fn(self,pkt,r): + d = r + assert isinstance(d, DeviceTaskRec) + if pkt is None: + pkt = d.pending + if pkt is None: + return self.waitTask() + else: + d.pending = None + return self.qpkt(pkt) + else: + d.pending = pkt + if tracing: trace(pkt.datum) + return self.hold() + + + +class HandlerTask(Task): + def __init__(self,i,p,w,s,r, taskWorkArea): + Task.__init__(self,i,p,w,s,r, taskWorkArea) + + def fn(self,pkt,r): + h = r + assert isinstance(h, HandlerTaskRec) + if pkt is not None: + if pkt.kind == K_WORK: + h.workInAdd(pkt) + else: + h.deviceInAdd(pkt) + work = h.work_in + if work is None: + return self.waitTask() + count = work.datum + if count >= BUFSIZE: + h.work_in = work.link + return self.qpkt(work) + + dev = h.device_in + if dev is None: + return self.waitTask() + + h.device_in = dev.link + dev.datum = work.data[count] + work.datum = count + 1 + return self.qpkt(dev) + +# IdleTask + + +class IdleTask(Task): + def __init__(self,i,p,w,s,r, taskWorkArea): + Task.__init__(self,i,0,None,s,r, taskWorkArea) + + def fn(self,pkt,r): + i = r + assert isinstance(i, IdleTaskRec) + i.count -= 1 + if i.count == 0: + return self.hold() + elif i.control & 1 == 0: + i.control /= 2 + return self.release(I_DEVA) + else: + i.control = i.control/2 ^ 0xd008 + return self.release(I_DEVB) + + +# WorkTask + + +A = ord('A') + +class WorkTask(Task): + def __init__(self,i,p,w,s,r, taskWorkArea): + Task.__init__(self,i,p,w,s,r, taskWorkArea) + + def fn(self,pkt,r): + w = r + assert isinstance(w, WorkerTaskRec) + if pkt is None: + return self.waitTask() + + if w.destination == I_HANDLERA: + dest = I_HANDLERB + else: + dest = I_HANDLERA + + w.destination = dest + pkt.ident = dest + pkt.datum = 0 + + for i in BUFSIZE_RANGE: # xrange(BUFSIZE) + w.count += 1 + if w.count > 26: + w.count = 1 + pkt.data[i] = A + w.count - 1 + + return self.qpkt(pkt) + +try: + from time import time +except ImportError: + def time(): + return 0 + + +def schedule(taskWorkArea): + t = taskWorkArea.taskList + while t is not None: + pkt = None + + if tracing: + print "tcb =",t.ident + + if t.isTaskHoldingOrWaiting(): + t = t.link + else: + if tracing: trace(chr(ord("0")+t.ident)) + t = t.runTask() + +class Richards(object): + + def __init__(self): + self.finished_lock = thread.allocate_lock() + self.finished_lock.acquire() + self.taskWorkArea = TaskWorkArea() + + def run_and_unlock(self, to_do): + os.write(1, 'running...\n') + iterations = 0 + self.result = True + while 1: + try: + to_do.pop() + except IndexError: + break + iterations += 1 + self.result = self.run(self.taskWorkArea) + os.write(1, 'done, iterations=%d, result=%r\n' % (iterations, self.result)) + self.finished_lock.release() + + def run(self, taskWorkArea): + #with atomic: + if 1: + taskWorkArea.holdCount = 0 + taskWorkArea.qpktCount = 0 + + IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec(), + taskWorkArea) + + wkq = Packet(None, 0, K_WORK) + wkq = Packet(wkq , 0, K_WORK) + WorkTask(I_WORK, 1000, wkq, TaskState().waitingWithPacket(), WorkerTaskRec(), + taskWorkArea) + + wkq = Packet(None, I_DEVA, K_DEV) + wkq = Packet(wkq , I_DEVA, K_DEV) + wkq = Packet(wkq , I_DEVA, K_DEV) + HandlerTask(I_HANDLERA, 2000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec(), + taskWorkArea) + + wkq = Packet(None, I_DEVB, K_DEV) + wkq = Packet(wkq , I_DEVB, K_DEV) + wkq = Packet(wkq , I_DEVB, K_DEV) + HandlerTask(I_HANDLERB, 3000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec(), + taskWorkArea) + + wkq = None; + DeviceTask(I_DEVA, 4000, wkq, TaskState().waiting(), DeviceTaskRec(), + taskWorkArea) + DeviceTask(I_DEVB, 5000, wkq, TaskState().waiting(), DeviceTaskRec(), + taskWorkArea) + + schedule(taskWorkArea) + + if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246: + pass + else: + return False + + return True + +def entry_point(iterations, NUM_THREADS): + rlist = [Richards() for i in range(NUM_THREADS)] + to_do = [None] * iterations + startTime = time() + for r in rlist: + thread.start_new_thread(r.run_and_unlock, (to_do,)) + for r in rlist: + r.finished_lock.acquire() + endTime = time() + assert to_do == [] + result = all(r.result for r in rlist) + return result, startTime, endTime + +def main(entry_point = entry_point, iterations = 10, threads = 4): + print "Richards benchmark (Python) starting... [%r]" % entry_point + result, startTime, endTime = entry_point(iterations, threads) + if not result: + print "Incorrect results!" + return -1 + print "finished." + total_s = endTime - startTime + print "Total time for %d iterations: %.2f secs" %(iterations,total_s) + print "Average time per iteration: %.2f ms" %(total_s*1000/iterations) + return 42 + +if __name__ == '__main__': + import sys + main(iterations = int(sys.argv[1]), + threads = int(sys.argv[2])) diff --git a/unladen_swallow/performance/richards.py b/unladen_swallow/performance/richards.py --- a/unladen_swallow/performance/richards.py +++ b/unladen_swallow/performance/richards.py @@ -305,7 +305,7 @@ i.control /= 2 return self.release(I_DEVA) else: - i.control = i.control/2 ^ 0xd008 + i.control = (i.control/2) ^ 0xd008 return self.release(I_DEVB) From noreply at buildbot.pypy.org Mon Mar 24 20:56:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 20:56:50 +0100 (CET) Subject: [pypy-commit] stmgc default: An extra assert: when we abort, we don't try to *increase* the Message-ID: <20140324195650.D9B2B1C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1090:510720112e4c Date: 2014-03-24 20:56 +0100 http://bitbucket.org/pypy/stmgc/changeset/510720112e4c/ Log: An extra assert: when we abort, we don't try to *increase* the number of entries in the shadowstack diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -589,6 +589,7 @@ /* reset the tl->shadowstack and thread_local_obj to their original value before the transaction start */ stm_thread_local_t *tl = pseg->pub.running_thread; + assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction); tl->shadowstack = pseg->shadowstack_at_start_of_transaction; tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; tl->last_abort__bytes_in_nursery = bytes_in_nursery; From noreply at buildbot.pypy.org Mon Mar 24 21:22:38 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 24 Mar 2014 21:22:38 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: abi differentiation could be cleaner? Message-ID: <20140324202238.5D1221D25C4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70264:13cf0ac5274e Date: 2014-03-24 22:21 +0200 http://bitbucket.org/pypy/pypy/changeset/13cf0ac5274e/ Log: abi differentiation could be cleaner? diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -66,9 +66,11 @@ self.args = args class CallDescr(AbstractDescr): - def __init__(self, RESULT, ARGS, extrainfo): + from rpython.rlib.clibffi import FFI_DEFAULT_ABI + def __init__(self, RESULT, ARGS, extrainfo, ABI=FFI_DEFAULT_ABI): self.RESULT = RESULT self.ARGS = ARGS + self.ABI = ABI self.extrainfo = extrainfo def __repr__(self): @@ -428,7 +430,7 @@ try: return self.descrs[key] except KeyError: - descr = CallDescr(RESULT, ARGS, extrainfo) + descr = CallDescr(RESULT, ARGS, extrainfo, ABI=cif_description.abi) self.descrs[key] = descr return descr @@ -949,7 +951,7 @@ # graph, not to directly execute the python function result = self.cpu.maybe_on_top_of_llinterp(func, call_args, descr.RESULT) else: - FUNC = lltype.FuncType(descr.ARGS, descr.RESULT) + FUNC = lltype.FuncType(descr.ARGS, descr.RESULT, descr.ABI) func_to_call = rffi.cast(lltype.Ptr(FUNC), func) result = func_to_call(*call_args) del self.force_guard_op diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -358,6 +358,13 @@ if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): + + functype = ctypes.CFUNCTYPE + if sys.platform == 'win32': + from rpython.rlib.clibffi import FFI_STDCALL, FFI_DEFAULT_ABI + if getattr(T.TO, 'ABI', FFI_DEFAULT_ABI) == FFI_STDCALL: + # for win32 system call + functype = ctypes.WINFUNCTYPE argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS if ARG is not lltype.Void] if T.TO.RESULT is lltype.Void: @@ -366,16 +373,10 @@ restype = get_ctypes_type(T.TO.RESULT) try: kwds = {'use_errno': True} - if getattr(T.TO, 'ABI', 'FFI_STDCALL'): - # for win32 system call - return ctypes.WINFUNCTYPE(restype, *argtypes, **kwds) - return ctypes.CFUNCTYPE(restype, *argtypes, **kwds) + return functype(restype, *argtypes, **kwds) except TypeError: # unexpected 'use_errno' argument, old ctypes version - if getattr(T.TO, 'ABI', 'FFI_STDCALL'): - # for win32 system call - return ctypes.WINFUNCTYPE(restype, *argtypes) - return ctypes.CFUNCTYPE(restype, *argtypes) + return functype(restype, *argtypes) elif isinstance(T.TO, lltype.OpaqueType): return ctypes.c_void_p else: diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -537,7 +537,7 @@ class FuncType(ContainerType): _gckind = 'raw' __name__ = 'func' - def __init__(self, args, result): + def __init__(self, args, result, abi='FFI_DEFAULT_ABI'): for arg in args: assert isinstance(arg, LowLevelType) # There are external C functions eating raw structures, not @@ -547,6 +547,7 @@ if isinstance(result, ContainerType): raise TypeError, "function result can only be primitive or pointer" self.RESULT = result + self.ABI = abi def __str__(self): args = ', '.join(map(str, self.ARGS)) From noreply at buildbot.pypy.org Mon Mar 24 21:58:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 21:58:36 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/510720112e4c Message-ID: <20140324205836.3A1BF1C0166@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70265:529c84afe33c Date: 2014-03-24 21:46 +0100 http://bitbucket.org/pypy/pypy/changeset/529c84afe33c/ Log: import stmgc/510720112e4c diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -ee65d7dc215e +510720112e4c diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -590,6 +590,7 @@ /* reset the tl->shadowstack and thread_local_obj to their original value before the transaction start */ stm_thread_local_t *tl = pseg->pub.running_thread; + assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction); tl->shadowstack = pseg->shadowstack_at_start_of_transaction; tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; tl->last_abort__bytes_in_nursery = bytes_in_nursery; From noreply at buildbot.pypy.org Mon Mar 24 21:58:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 21:58:37 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix Message-ID: <20140324205837.9F9BC1C0166@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70266:31386d1544cb Date: 2014-03-24 21:47 +0100 http://bitbucket.org/pypy/pypy/changeset/31386d1544cb/ Log: Fix diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -157,8 +157,12 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - self._load_shadowstack_top_in_ebx(mc, gcrootmap) + mc.MOV(ebx, self.heap_shadowstack_top()) mc.MOV_mr((self.SEGMENT_NO, ebx.value, -WORD), eax.value) + # STM note: this stores the updated jitframe object in the + # position -WORD, but (in this case) leaves the position + # -2*WORD untouched. This old jitframe object remains in + # the shadowstack just in case we do an abort later. mc.MOV_bi((self.SEGMENT_FRAME, gcmap_ofs), 0) self._pop_all_regs_from_frame(mc, [], self.cpu.supports_floats) @@ -771,7 +775,7 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - self._call_header_shadowstack(gcrootmap) + self._call_header_shadowstack() def _call_header_with_stack_check(self): self._call_header() @@ -801,13 +805,9 @@ # we called a pypy_stm_start_transaction() earlier. assert IS_X86_64 # - # load the shadowstack pointer into ebx, and decrement it, - # but don't decrement the official shadowstack yet! We just - # keep it in ebx for a while (a callee-saved register). + # load the shadowstack pointer into ebx (a callee-saved register) mc = self.mc - rst = self.heap_tl(gcrootmap.get_root_stack_top_addr()) - mc.MOV(ebx, rst) - mc.SUB_ri(ebx.value, WORD) + mc.MOV(ebx, self.heap_shadowstack_top()) # load the address of the jmpbuf mc.LEA_rs(edi.value, STM_JMPBUF_OFS) # compare it with the currently-stored jmpbuf @@ -821,18 +821,20 @@ mc.CALL(imm(rstm.adr__stm_become_inevitable)) # there could have been a collection in _stm_become_inevitable; # reload the frame into ebp - mc.MOV_rm(ebp.value, (self.SEGMENT_NO, ebx.value, 0)) + mc.MOV_rm(ebp.value, (self.SEGMENT_NO, ebx.value, -WORD)) # # this is where the JNE above jumps offset = mc.get_relative_pos() - jne_location assert 0 < offset <= 127 mc.overwrite(jne_location-1, chr(offset)) # - # now store ebx back, which will really decrement the shadowstack - mc.MOV(rst, ebx) + # now decrement ebx by 2*WORD and store it back, which will + # really decrement the shadowstack + mc.SUB_ri(ebx.value, 2 * WORD) + mc.MOV(self.heap_shadowstack_top(), ebx) elif gcrootmap and gcrootmap.is_shadow_stack: - self._call_footer_shadowstack(gcrootmap) + self._call_footer_shadowstack() # the return value is the jitframe self.mc.MOV_rr(eax.value, ebp.value) @@ -845,26 +847,37 @@ self.mc.ADD_ri(esp.value, self._get_whole_frame_size() * WORD) self.mc.RET() - def _load_shadowstack_top_in_ebx(self, mc, gcrootmap): - """Loads the shadowstack top in ebx, and returns an integer - that gives the address of the stack top. - """ - mc.MOV(ebx, self.heap_tl(gcrootmap.get_root_stack_top_addr())) + def heap_shadowstack_top(self): + """Return an AddressLoc for '&shadowstack', the shadow stack top.""" + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + return self.heap_tl(gcrootmap.get_root_stack_top_addr()) - def _call_header_shadowstack(self, gcrootmap): + def _call_header_shadowstack(self): # put the frame in ebp on the shadowstack for the GC to find # (ebp is a writeable object and does not need a write-barrier # again (ensured by the code calling the loop)) - self._load_shadowstack_top_in_ebx(self.mc, gcrootmap) + self.mc.MOV(ebx, self.heap_shadowstack_top()) self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0), ebp.value) # MOV [ebx], ebp - self.mc.ADD_ri(ebx.value, WORD) - self.mc.MOV(self.heap_tl(gcrootmap.get_root_stack_top_addr()), ebx) - # MOV [rootstacktop], ebx + if self.cpu.gc_ll_descr.stm: + # With stm, we push the jitframe twice on the shadowstack. + # If we break transaction inside this frame, we'll do it + # with only one item on the shadowstack, and then we'll + # duplicate it again. The point is to store both the + # old and the new copy when case we do a realloc_frame, + # just for the case where we later abort. + self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, WORD), ebp.value) + self.mc.ADD_ri(ebx.value, 2 * WORD) + else: + self.mc.ADD_ri(ebx.value, WORD) + self.mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx - def _call_footer_shadowstack(self, gcrootmap): - self.mc.SUB(self.heap_tl(gcrootmap.get_root_stack_top_addr()), WORD) - # SUB [rootstacktop], WORD + def _call_footer_shadowstack(self): + # SUB [rootstacktop], WORD (or 2 * WORD with STM) + if self.cpu.gc_ll_descr.stm: + self.mc.SUB(self.heap_shadowstack_top(), 2 * WORD) + else: + self.mc.SUB(self.heap_shadowstack_top(), WORD) def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking @@ -1133,9 +1146,12 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: if gcrootmap.is_shadow_stack: - mc.MOV(ecx, self.heap_tl(gcrootmap.get_root_stack_top_addr())) + mc.MOV(ecx, self.heap_shadowstack_top()) mc.MOV(ebp, mem(self.SEGMENT_NO, ecx, -WORD)) - # + self._reload_frame_wb(mc, align_stack) + + def _reload_frame_wb(self, mc, align_stack=False): + gcrootmap = self.cpu.gc_ll_descr.gcrootmap wbdescr = self.cpu.gc_ll_descr.write_barrier_descr if gcrootmap and wbdescr: # frame never uses card marking, so we enforce this is not @@ -2535,6 +2551,12 @@ # call stm_commit_transaction() mc.CALL(imm(rstm.adr_stm_commit_transaction)) # + # copy shadowstack[-1] into shadowstack[-2]: the latter is + # not going to be used any more, now that we committed + mc.MOV(ebx, self.heap_shadowstack_top()) + mc.MOV_rm(eax.value, (self.SEGMENT_NO, ebx, -WORD)) + mc.MOV_mr((self.SEGMENT_NO, ebx, -2 * WORD), eax.value) + # # update the two words in the STM_RESUME_BUF, as described # in arch.py. The "learip" pseudo-instruction turns into # what is, in gnu as syntax: lea 0(%rip), %rax (the 0 is @@ -2561,8 +2583,13 @@ mc.LEA_rs(esi.value, STM_JMPBUF_OFS_RBP) mc.CALL(imm(rstm.adr_pypy_stm_start_transaction)) # - # reload ebp with the frame now - self._reload_frame_if_necessary(self.mc) + # reload ebp with the frame now, picking the value from + # shadowstack[-2] and duplicating it into shadowstack[-1]. + # Only realloc_frame can make these values different again. + mc.MOV(ebx, self.heap_shadowstack_top()) + mc.MOV_rm(ebp.value, (self.SEGMENT_NO, ebx, -2 * WORD)) + mc.MOV_mr((self.SEGMENT_NO, ebx, -WORD), ebp.value) + self._reload_frame_wb(self.mc) # # restore regs self._push_pop_regs_to_frame(False, mc, grp_regs, xmm_regs) From noreply at buildbot.pypy.org Mon Mar 24 22:50:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Mar 2014 22:50:24 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fix translation Message-ID: <20140324215024.0DE951C0161@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70267:6c27839adebb Date: 2014-03-24 22:49 +0100 http://bitbucket.org/pypy/pypy/changeset/6c27839adebb/ Log: fix translation diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2554,8 +2554,8 @@ # copy shadowstack[-1] into shadowstack[-2]: the latter is # not going to be used any more, now that we committed mc.MOV(ebx, self.heap_shadowstack_top()) - mc.MOV_rm(eax.value, (self.SEGMENT_NO, ebx, -WORD)) - mc.MOV_mr((self.SEGMENT_NO, ebx, -2 * WORD), eax.value) + mc.MOV_rm(eax.value, (self.SEGMENT_NO, ebx.value, -WORD)) + mc.MOV_mr((self.SEGMENT_NO, ebx.value, -2 * WORD), eax.value) # # update the two words in the STM_RESUME_BUF, as described # in arch.py. The "learip" pseudo-instruction turns into @@ -2587,8 +2587,8 @@ # shadowstack[-2] and duplicating it into shadowstack[-1]. # Only realloc_frame can make these values different again. mc.MOV(ebx, self.heap_shadowstack_top()) - mc.MOV_rm(ebp.value, (self.SEGMENT_NO, ebx, -2 * WORD)) - mc.MOV_mr((self.SEGMENT_NO, ebx, -WORD), ebp.value) + mc.MOV_rm(ebp.value, (self.SEGMENT_NO, ebx.value, -2 * WORD)) + mc.MOV_mr((self.SEGMENT_NO, ebx.value, -WORD), ebp.value) self._reload_frame_wb(self.mc) # # restore regs From noreply at buildbot.pypy.org Mon Mar 24 22:52:35 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 24 Mar 2014 22:52:35 +0100 (CET) Subject: [pypy-commit] pypy default: document merged branches (Marc Abramowitz) Message-ID: <20140324215235.424E41C0161@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70268:72d4b0043081 Date: 2014-03-24 23:45 +0200 http://bitbucket.org/pypy/pypy/changeset/72d4b0043081/ Log: document merged branches (Marc Abramowitz) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -117,3 +117,10 @@ .. branch: improve-consecutive-dict-lookups Improve the situation when dict lookups of the same key are performed in a chain + +.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 +.. branch: test_SetFromErrnoWithFilename_NULL +.. branch: test_SetFromErrnoWithFilename__tweaks + +.. branch: refactor_PyErr_SetFromErrnoWithFilename +Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext From noreply at buildbot.pypy.org Mon Mar 24 23:23:26 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 24 Mar 2014 23:23:26 +0100 (CET) Subject: [pypy-commit] pypy default: skip sandbox on windows Message-ID: <20140324222326.4545F1C0166@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70269:99d559ea7f5f Date: 2014-03-25 00:22 +0200 http://bitbucket.org/pypy/pypy/changeset/99d559ea7f5f/ Log: skip sandbox on windows diff --git a/pypy/sandbox/test/test_pypy_interact.py b/pypy/sandbox/test/test_pypy_interact.py --- a/pypy/sandbox/test/test_pypy_interact.py +++ b/pypy/sandbox/test/test_pypy_interact.py @@ -1,4 +1,4 @@ -import os, sys, stat, errno +import os, stat, errno, py from pypy.sandbox.pypy_interact import PyPySandboxedProc from rpython.translator.interactive import Translation @@ -9,6 +9,9 @@ SITE_PY_CONTENT = LIB_PYTHON.join('site.py').read() ERROR_TEXT = os.strerror(errno.ENOENT) +if os.name == 'nt': + py.test.skip('sandbox not supported on windows') + def assert_(cond, text): if not cond: print "assert failed:", text From noreply at buildbot.pypy.org Tue Mar 25 00:37:37 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 25 Mar 2014 00:37:37 +0100 (CET) Subject: [pypy-commit] pypy py3k: space.buffer was killed Message-ID: <20140324233737.AC3B21C0161@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70270:0581af1a5243 Date: 2014-03-24 16:17 -0700 http://bitbucket.org/pypy/pypy/changeset/0581af1a5243/ Log: space.buffer was killed diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py --- a/pypy/interpreter/test/test_buffer.py +++ b/pypy/interpreter/test/test_buffer.py @@ -16,7 +16,7 @@ assert space.bufferstr_w(w_hello) == 'hello world' assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello))) == 'hello world' space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) - e = space.raises_w(space.w_TypeError, space.buffer, space.wrap(5)) + e = space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) message = space.unwrap(e.value.get_w_value(space)) assert "'int' does not support the buffer interface" == message From noreply at buildbot.pypy.org Tue Mar 25 00:37:38 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 25 Mar 2014 00:37:38 +0100 (CET) Subject: [pypy-commit] pypy py3k: py3k still needs _check_released in buffer_w Message-ID: <20140324233738.EED711C0161@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70271:a0706792c660 Date: 2014-03-24 16:36 -0700 http://bitbucket.org/pypy/pypy/changeset/a0706792c660/ Log: py3k still needs _check_released in buffer_w diff --git a/pypy/objspace/std/memoryview.py b/pypy/objspace/std/memoryview.py --- a/pypy/objspace/std/memoryview.py +++ b/pypy/objspace/std/memoryview.py @@ -49,6 +49,7 @@ consistency, in PyPy memoryview DOES support buffer(), which means that it is accepted in more places than CPython. """ + self._check_released(space) return self.buf @staticmethod diff --git a/pypy/objspace/std/test/test_memoryview.py b/pypy/objspace/std/test/test_memoryview.py --- a/pypy/objspace/std/test/test_memoryview.py +++ b/pypy/objspace/std/test/test_memoryview.py @@ -116,4 +116,5 @@ v = memoryview(b"a"*100) with v as cm: assert cm is v + raises(ValueError, bytes, v) assert "released memory" in repr(v) From noreply at buildbot.pypy.org Tue Mar 25 00:44:53 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 25 Mar 2014 00:44:53 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to py3 Message-ID: <20140324234453.1A47C1C0161@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70272:5cba21f3d2be Date: 2014-03-24 16:44 -0700 http://bitbucket.org/pypy/pypy/changeset/5cba21f3d2be/ Log: adapt to py3 diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -186,7 +186,7 @@ from array import array class subtype(array): pass - assert marshal.dumps(subtype('c', 'test')) == marshal.dumps(array('c', 'test')) + assert marshal.dumps(subtype('b', b'test')) == marshal.dumps(array('b', b'test')) def test_bad_typecode(self): import marshal From noreply at buildbot.pypy.org Tue Mar 25 08:56:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 08:56:01 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: A better attempt at fixing 31386d1544cb Message-ID: <20140325075601.7A7B71C309E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70273:0f519ccd4e65 Date: 2014-03-25 08:55 +0100 http://bitbucket.org/pypy/pypy/changeset/0f519ccd4e65/ Log: A better attempt at fixing 31386d1544cb diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -54,10 +54,10 @@ # meaningful. We use ebp, i.e. the word at offset +0, to store the # resume counter. -STM_RESUME_BUF_WORDS = 4 # <-- for alignment, it can't be 3 +STM_RESUME_BUF_WORDS = 4 STM_FRAME_FIXED_SIZE = FRAME_FIXED_SIZE + STM_RESUME_BUF_WORDS STM_JMPBUF_OFS = WORD * FRAME_FIXED_SIZE STM_JMPBUF_OFS_RBP = STM_JMPBUF_OFS + 0 * WORD STM_JMPBUF_OFS_RIP = STM_JMPBUF_OFS + 1 * WORD STM_JMPBUF_OFS_RSP = STM_JMPBUF_OFS + 2 * WORD -# unused: STM_JMPBUF_OFS + 3 * WORD +STM_OLD_SHADOWSTACK = STM_JMPBUF_OFS + 3 * WORD diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -19,7 +19,8 @@ from rpython.jit.backend.x86.arch import ( FRAME_FIXED_SIZE, WORD, IS_X86_64, JITFRAME_FIXED_SIZE, IS_X86_32, PASS_ON_MY_FRAME, STM_FRAME_FIXED_SIZE, STM_JMPBUF_OFS, - STM_JMPBUF_OFS_RIP, STM_JMPBUF_OFS_RSP, STM_JMPBUF_OFS_RBP) + STM_JMPBUF_OFS_RIP, STM_JMPBUF_OFS_RSP, STM_JMPBUF_OFS_RBP, + STM_OLD_SHADOWSTACK) from rpython.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, r8, r9, r10, r11, edi, r12, r13, r14, r15, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, @@ -158,11 +159,17 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: mc.MOV(ebx, self.heap_shadowstack_top()) + if self.cpu.gc_ll_descr.stm: + # STM: there is a restriction on updating the shadowstack + # in-place: never put young objects below what is the + # transaction start's shadowstack position! As we might + # have started a transction in the same frame with the + # same value of shadowstack as now, we have nowhere to put + # this new young jitframe object -- so we have to PUSH it. + mc.ADD_ri(ebx.value, WORD) + mc.MOV(self.heap_shadowstack_top(), ebx) + # mc.MOV_mr((self.SEGMENT_NO, ebx.value, -WORD), eax.value) - # STM note: this stores the updated jitframe object in the - # position -WORD, but (in this case) leaves the position - # -2*WORD untouched. This old jitframe object remains in - # the shadowstack just in case we do an abort later. mc.MOV_bi((self.SEGMENT_FRAME, gcmap_ofs), 0) self._pop_all_regs_from_frame(mc, [], self.cpu.supports_floats) @@ -804,10 +811,8 @@ # to this frame, because we're about to leave. This is if # we called a pypy_stm_start_transaction() earlier. assert IS_X86_64 + mc = self.mc # - # load the shadowstack pointer into ebx (a callee-saved register) - mc = self.mc - mc.MOV(ebx, self.heap_shadowstack_top()) # load the address of the jmpbuf mc.LEA_rs(edi.value, STM_JMPBUF_OFS) # compare it with the currently-stored jmpbuf @@ -820,20 +825,17 @@ mc.MOV_ri(edi.value, rstm.adr_jit_default_msg) mc.CALL(imm(rstm.adr__stm_become_inevitable)) # there could have been a collection in _stm_become_inevitable; - # reload the frame into ebp - mc.MOV_rm(ebp.value, (self.SEGMENT_NO, ebx.value, -WORD)) + # reload the frame into ebp (but we don't need to apply the + # write barrier to it now) + mc.MOV(ecx, self.heap_shadowstack_top()) + mc.MOV_rm(ebp.value, (self.SEGMENT_NO, ecx.value, -WORD)) # # this is where the JNE above jumps offset = mc.get_relative_pos() - jne_location assert 0 < offset <= 127 mc.overwrite(jne_location-1, chr(offset)) - # - # now decrement ebx by 2*WORD and store it back, which will - # really decrement the shadowstack - mc.SUB_ri(ebx.value, 2 * WORD) - mc.MOV(self.heap_shadowstack_top(), ebx) - elif gcrootmap and gcrootmap.is_shadow_stack: + if gcrootmap and gcrootmap.is_shadow_stack: self._call_footer_shadowstack() # the return value is the jitframe @@ -860,23 +862,20 @@ self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0), ebp.value) # MOV [ebx], ebp if self.cpu.gc_ll_descr.stm: - # With stm, we push the jitframe twice on the shadowstack. - # If we break transaction inside this frame, we'll do it - # with only one item on the shadowstack, and then we'll - # duplicate it again. The point is to store both the - # old and the new copy when case we do a realloc_frame, - # just for the case where we later abort. - self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, WORD), ebp.value) - self.mc.ADD_ri(ebx.value, 2 * WORD) - else: - self.mc.ADD_ri(ebx.value, WORD) + self.mc.MOV_sr(STM_OLD_SHADOWSTACK, ebx.value) + self.mc.ADD_ri(ebx.value, WORD) self.mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx def _call_footer_shadowstack(self): - # SUB [rootstacktop], WORD (or 2 * WORD with STM) if self.cpu.gc_ll_descr.stm: - self.mc.SUB(self.heap_shadowstack_top(), 2 * WORD) + # STM: in the rare case where we need realloc_frame, the new + # frame is pushed on top of the old one. It's even possible + # that this occurs more than once. So we have to restore + # the old shadowstack by looking up its original saved value. + self.mc.MOV_rs(ecx.value, STM_OLD_SHADOWSTACK) + self.mc.MOV(self.heap_shadowstack_top(), ecx) else: + # SUB [rootstacktop], WORD self.mc.SUB(self.heap_shadowstack_top(), WORD) def redirect_call_assembler(self, oldlooptoken, newlooptoken): @@ -1148,10 +1147,7 @@ if gcrootmap.is_shadow_stack: mc.MOV(ecx, self.heap_shadowstack_top()) mc.MOV(ebp, mem(self.SEGMENT_NO, ecx, -WORD)) - self._reload_frame_wb(mc, align_stack) - - def _reload_frame_wb(self, mc, align_stack=False): - gcrootmap = self.cpu.gc_ll_descr.gcrootmap + # wbdescr = self.cpu.gc_ll_descr.write_barrier_descr if gcrootmap and wbdescr: # frame never uses card marking, so we enforce this is not @@ -2551,12 +2547,6 @@ # call stm_commit_transaction() mc.CALL(imm(rstm.adr_stm_commit_transaction)) # - # copy shadowstack[-1] into shadowstack[-2]: the latter is - # not going to be used any more, now that we committed - mc.MOV(ebx, self.heap_shadowstack_top()) - mc.MOV_rm(eax.value, (self.SEGMENT_NO, ebx.value, -WORD)) - mc.MOV_mr((self.SEGMENT_NO, ebx.value, -2 * WORD), eax.value) - # # update the two words in the STM_RESUME_BUF, as described # in arch.py. The "learip" pseudo-instruction turns into # what is, in gnu as syntax: lea 0(%rip), %rax (the 0 is @@ -2583,13 +2573,8 @@ mc.LEA_rs(esi.value, STM_JMPBUF_OFS_RBP) mc.CALL(imm(rstm.adr_pypy_stm_start_transaction)) # - # reload ebp with the frame now, picking the value from - # shadowstack[-2] and duplicating it into shadowstack[-1]. - # Only realloc_frame can make these values different again. - mc.MOV(ebx, self.heap_shadowstack_top()) - mc.MOV_rm(ebp.value, (self.SEGMENT_NO, ebx.value, -2 * WORD)) - mc.MOV_mr((self.SEGMENT_NO, ebx.value, -WORD), ebp.value) - self._reload_frame_wb(self.mc) + # reload ebp with the frame now + self._reload_frame_if_necessary(self.mc) # # restore regs self._push_pop_regs_to_frame(False, mc, grp_regs, xmm_regs) From noreply at buildbot.pypy.org Tue Mar 25 09:27:22 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 25 Mar 2014 09:27:22 +0100 (CET) Subject: [pypy-commit] benchmarks single-run: Backed out changeset 7180631ee8db Message-ID: <20140325082722.DEEE21C066C@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: single-run Changeset: r238:2e71651d7222 Date: 2014-03-24 21:25 +0200 http://bitbucket.org/pypy/benchmarks/changeset/2e71651d7222/ Log: Backed out changeset 7180631ee8db diff --git a/lib/pypy/include/pypy_decl.h b/lib/pypy/include/pypy_decl.h --- a/lib/pypy/include/pypy_decl.h +++ b/lib/pypy/include/pypy_decl.h @@ -513,17 +513,17 @@ PyAPI_DATA(PyTypeObject) PySlice_Type; PyAPI_DATA(PyObject*) PyExc_IOError; PyAPI_DATA(PyObject*) PyExc_RuntimeError; -PyAPI_DATA(PyObject*) PyExc_AttributeError; +PyAPI_DATA(PyObject*) PyExc_SystemError; PyAPI_DATA(PyObject*) PyExc_NameError; PyAPI_DATA(PyObject*) PyExc_MemoryError; PyAPI_DATA(PyObject*) PyExc_SystemExit; PyAPI_DATA(PyTypeObject) PyModule_Type; PyAPI_DATA(PyTypeObject) PyBaseObject_Type; -PyAPI_DATA(PyObject*) PyExc_FloatingPointError; -PyAPI_DATA(PyObject*) PyExc_UnicodeDecodeError; +PyAPI_DATA(PyObject*) PyExc_UnicodeTranslateError; +PyAPI_DATA(PyObject*) PyExc_UnicodeWarning; PyAPI_DATA(PyObject*) PyExc_Exception; PyAPI_DATA(PyObject*) PyExc_TypeError; -PyAPI_DATA(PyObject*) PyExc_SystemError; +PyAPI_DATA(PyObject*) PyExc_AttributeError; PyAPI_DATA(PyObject*) PyExc_ReferenceError; PyAPI_DATA(PyTypeObject) PyNotImplemented_Type; PyAPI_DATA(PyTypeObject) PySet_Type; @@ -555,14 +555,14 @@ PyAPI_DATA(PyObject*) PyExc_BytesWarning; PyAPI_DATA(PyObject*) PyExc_DeprecationWarning; PyAPI_DATA(PyObject*) PyExc_SyntaxError; -PyAPI_DATA(PyObject*) PyExc_UnicodeWarning; +PyAPI_DATA(PyObject*) PyExc_UnicodeDecodeError; PyAPI_DATA(PyObject*) PyExc_ZeroDivisionError; PyAPI_DATA(PyTypeObject) PyFloat_Type; +PyAPI_DATA(PyTypeObject) PyBaseString_Type; +PyAPI_DATA(PyObject) _Py_NoneStruct; +PyAPI_DATA(PyObject*) PyExc_GeneratorExit; +PyAPI_DATA(PyObject*) PyExc_AssertionError; PyAPI_DATA(PyObject*) PyExc_RuntimeWarning; -PyAPI_DATA(PyObject) _Py_NoneStruct; -PyAPI_DATA(PyObject*) PyExc_IndentationError; -PyAPI_DATA(PyObject*) PyExc_AssertionError; -PyAPI_DATA(PyObject*) PyExc_GeneratorExit; PyAPI_DATA(PyObject*) PyExc_ImportWarning; PyAPI_DATA(PyObject*) PyExc_UnicodeEncodeError; PyAPI_DATA(PyTypeObject) PyInt_Type; @@ -571,8 +571,8 @@ PyAPI_DATA(PyObject*) PyExc_OSError; PyAPI_DATA(PyObject*) PyExc_KeyError; PyAPI_DATA(PyObject*) PyExc_SyntaxWarning; -PyAPI_DATA(PyTypeObject) PyBaseString_Type; PyAPI_DATA(PyObject*) PyExc_StopIteration; +PyAPI_DATA(PyObject*) PyExc_IndentationError; PyAPI_DATA(PyObject*) PyExc_NotImplementedError; PyAPI_DATA(PyObject*) PyExc_ImportError; PyAPI_DATA(PyDateTime_CAPI*) PyDateTimeAPI; @@ -582,7 +582,7 @@ PyAPI_DATA(PyTypeObject) PyClass_Type; PyAPI_DATA(PyTypeObject) PyType_Type; PyAPI_DATA(PyTypeObject) PyMemoryView_Type; -PyAPI_DATA(PyObject*) PyExc_UnicodeTranslateError; +PyAPI_DATA(PyObject*) PyExc_FloatingPointError; PyAPI_DATA(PyObject*) PyExc_LookupError; PyAPI_DATA(PyObject*) PyExc_EOFError; PyAPI_DATA(PyObject*) PyExc_BufferError; diff --git a/lib/pypy/rpython/translator/c/src/signals.o b/lib/pypy/rpython/translator/c/src/signals.o index 08b3649ae1483b939bb048d11ecb051f62ffe15d..166016f96874014a34535131bd7cfed3aabe09eb GIT binary patch [cut] diff --git a/lib/pypy/rpython/translator/c/src/stacklet/stacklet.o b/lib/pypy/rpython/translator/c/src/stacklet/stacklet.o index 6dba4dd0d4a6480683502d1568c529dbaecf9174..e42497399f1f975b42e62fbf236498014c21f654 GIT binary patch [cut] diff --git a/lib/pypy/rpython/translator/c/src/thread.o b/lib/pypy/rpython/translator/c/src/thread.o index 06072cecd32b5f5afce76655ad919f8ff637714c..9b7dcc7cb6e9c6eb94216acf121fbcd5b32297e5 GIT binary patch [cut] diff --git a/multithread/multithread-richards.py b/multithread/multithread-richards.py deleted file mode 100755 --- a/multithread/multithread-richards.py +++ /dev/null @@ -1,458 +0,0 @@ -# based on a Java version: -# Based on original version written in BCPL by Dr Martin Richards -# in 1981 at Cambridge University Computer Laboratory, England -# and a C++ version derived from a Smalltalk version written by -# L Peter Deutsch. -# Java version: Copyright (C) 1995 Sun Microsystems, Inc. -# Translation from C++, Mario Wolczko -# Outer loop added by Alex Jacoby - -import thread, os -#from __pypy__.thread import atomic - - -# Task IDs -I_IDLE = 1 -I_WORK = 2 -I_HANDLERA = 3 -I_HANDLERB = 4 -I_DEVA = 5 -I_DEVB = 6 - -# Packet types -K_DEV = 1000 -K_WORK = 1001 - -# Packet - -BUFSIZE = 4 - -BUFSIZE_RANGE = range(BUFSIZE) - -class Packet(object): - def __init__(self,l,i,k): - self.link = l - self.ident = i - self.kind = k - self.datum = 0 - self.data = [0] * BUFSIZE - - def append_to(self,lst): - self.link = None - if lst is None: - return self - else: - p = lst - next = p.link - while next is not None: - p = next - next = p.link - p.link = self - return lst - -# Task Records - -class TaskRec(object): - pass - -class DeviceTaskRec(TaskRec): - def __init__(self): - self.pending = None - -class IdleTaskRec(TaskRec): - def __init__(self): - self.control = 1 - self.count = 10000 - -class HandlerTaskRec(TaskRec): - def __init__(self): - self.work_in = None - self.device_in = None - - def workInAdd(self,p): - self.work_in = p.append_to(self.work_in) - return self.work_in - - def deviceInAdd(self,p): - self.device_in = p.append_to(self.device_in) - return self.device_in - -class WorkerTaskRec(TaskRec): - def __init__(self): - self.destination = I_HANDLERA - self.count = 0 -# Task - -class TaskState(object): - def __init__(self): - self.packet_pending = True - self.task_waiting = False - self.task_holding = False - - def packetPending(self): - self.packet_pending = True - self.task_waiting = False - self.task_holding = False - return self - - def waiting(self): - self.packet_pending = False - self.task_waiting = True - self.task_holding = False - return self - - def running(self): - self.packet_pending = False - self.task_waiting = False - self.task_holding = False - return self - - def waitingWithPacket(self): - self.packet_pending = True - self.task_waiting = True - self.task_holding = False - return self - - def isPacketPending(self): - return self.packet_pending - - def isTaskWaiting(self): - return self.task_waiting - - def isTaskHolding(self): - return self.task_holding - - def isTaskHoldingOrWaiting(self): - return self.task_holding or (not self.packet_pending and self.task_waiting) - - def isWaitingWithPacket(self): - return self.packet_pending and self.task_waiting and not self.task_holding - - - - - -tracing = False -layout = 0 - -def trace(a): - global layout - layout -= 1 - if layout <= 0: - print - layout = 50 - print a, - - -TASKTABSIZE = 10 - -class TaskWorkArea(object): - def __init__(self): - self.taskTab = [None] * TASKTABSIZE - - self.taskList = None - - self.holdCount = 0 - self.qpktCount = 0 - -class Task(TaskState): - - - def __init__(self,i,p,w,initialState,r, taskWorkArea): - self.taskWorkArea = taskWorkArea - self.link = taskWorkArea.taskList - self.ident = i - self.priority = p - self.input = w - - self.packet_pending = initialState.isPacketPending() - self.task_waiting = initialState.isTaskWaiting() - self.task_holding = initialState.isTaskHolding() - - self.handle = r - - taskWorkArea.taskList = self - taskWorkArea.taskTab[i] = self - - def fn(self,pkt,r): - raise NotImplementedError - - - def addPacket(self,p,old): - if self.input is None: - self.input = p - self.packet_pending = True - if self.priority > old.priority: - return self - else: - p.append_to(self.input) - return old - - - def runTask(self): - if self.isWaitingWithPacket(): - msg = self.input - self.input = msg.link - if self.input is None: - self.running() - else: - self.packetPending() - else: - msg = None - - return self.fn(msg,self.handle) - - - def waitTask(self): - self.task_waiting = True - return self - - - def hold(self): - self.taskWorkArea.holdCount += 1 - self.task_holding = True - return self.link - - - def release(self,i): - t = self.findtcb(i) - t.task_holding = False - if t.priority > self.priority: - return t - else: - return self - - - def qpkt(self,pkt): - t = self.findtcb(pkt.ident) - self.taskWorkArea.qpktCount += 1 - pkt.link = None - pkt.ident = self.ident - return t.addPacket(pkt,self) - - - def findtcb(self,id): - t = self.taskWorkArea.taskTab[id] - if t is None: - raise Exception("Bad task id %d" % id) - return t - - -# DeviceTask - - -class DeviceTask(Task): - def __init__(self,i,p,w,s,r, taskWorkArea): - Task.__init__(self,i,p,w,s,r, taskWorkArea) - - def fn(self,pkt,r): - d = r - assert isinstance(d, DeviceTaskRec) - if pkt is None: - pkt = d.pending - if pkt is None: - return self.waitTask() - else: - d.pending = None - return self.qpkt(pkt) - else: - d.pending = pkt - if tracing: trace(pkt.datum) - return self.hold() - - - -class HandlerTask(Task): - def __init__(self,i,p,w,s,r, taskWorkArea): - Task.__init__(self,i,p,w,s,r, taskWorkArea) - - def fn(self,pkt,r): - h = r - assert isinstance(h, HandlerTaskRec) - if pkt is not None: - if pkt.kind == K_WORK: - h.workInAdd(pkt) - else: - h.deviceInAdd(pkt) - work = h.work_in - if work is None: - return self.waitTask() - count = work.datum - if count >= BUFSIZE: - h.work_in = work.link - return self.qpkt(work) - - dev = h.device_in - if dev is None: - return self.waitTask() - - h.device_in = dev.link - dev.datum = work.data[count] - work.datum = count + 1 - return self.qpkt(dev) - -# IdleTask - - -class IdleTask(Task): - def __init__(self,i,p,w,s,r, taskWorkArea): - Task.__init__(self,i,0,None,s,r, taskWorkArea) - - def fn(self,pkt,r): - i = r - assert isinstance(i, IdleTaskRec) - i.count -= 1 - if i.count == 0: - return self.hold() - elif i.control & 1 == 0: - i.control /= 2 - return self.release(I_DEVA) - else: - i.control = i.control/2 ^ 0xd008 - return self.release(I_DEVB) - - -# WorkTask - - -A = ord('A') - -class WorkTask(Task): - def __init__(self,i,p,w,s,r, taskWorkArea): - Task.__init__(self,i,p,w,s,r, taskWorkArea) - - def fn(self,pkt,r): - w = r - assert isinstance(w, WorkerTaskRec) - if pkt is None: - return self.waitTask() - - if w.destination == I_HANDLERA: - dest = I_HANDLERB - else: - dest = I_HANDLERA - - w.destination = dest - pkt.ident = dest - pkt.datum = 0 - - for i in BUFSIZE_RANGE: # xrange(BUFSIZE) - w.count += 1 - if w.count > 26: - w.count = 1 - pkt.data[i] = A + w.count - 1 - - return self.qpkt(pkt) - -try: - from time import time -except ImportError: - def time(): - return 0 - - -def schedule(taskWorkArea): - t = taskWorkArea.taskList - while t is not None: - pkt = None - - if tracing: - print "tcb =",t.ident - - if t.isTaskHoldingOrWaiting(): - t = t.link - else: - if tracing: trace(chr(ord("0")+t.ident)) - t = t.runTask() - -class Richards(object): - - def __init__(self): - self.finished_lock = thread.allocate_lock() - self.finished_lock.acquire() - self.taskWorkArea = TaskWorkArea() - - def run_and_unlock(self, to_do): - os.write(1, 'running...\n') - iterations = 0 - self.result = True - while 1: - try: - to_do.pop() - except IndexError: - break - iterations += 1 - self.result = self.run(self.taskWorkArea) - os.write(1, 'done, iterations=%d, result=%r\n' % (iterations, self.result)) - self.finished_lock.release() - - def run(self, taskWorkArea): - #with atomic: - if 1: - taskWorkArea.holdCount = 0 - taskWorkArea.qpktCount = 0 - - IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec(), - taskWorkArea) - - wkq = Packet(None, 0, K_WORK) - wkq = Packet(wkq , 0, K_WORK) - WorkTask(I_WORK, 1000, wkq, TaskState().waitingWithPacket(), WorkerTaskRec(), - taskWorkArea) - - wkq = Packet(None, I_DEVA, K_DEV) - wkq = Packet(wkq , I_DEVA, K_DEV) - wkq = Packet(wkq , I_DEVA, K_DEV) - HandlerTask(I_HANDLERA, 2000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec(), - taskWorkArea) - - wkq = Packet(None, I_DEVB, K_DEV) - wkq = Packet(wkq , I_DEVB, K_DEV) - wkq = Packet(wkq , I_DEVB, K_DEV) - HandlerTask(I_HANDLERB, 3000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec(), - taskWorkArea) - - wkq = None; - DeviceTask(I_DEVA, 4000, wkq, TaskState().waiting(), DeviceTaskRec(), - taskWorkArea) - DeviceTask(I_DEVB, 5000, wkq, TaskState().waiting(), DeviceTaskRec(), - taskWorkArea) - - schedule(taskWorkArea) - - if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246: - pass - else: - return False - - return True - -def entry_point(iterations, NUM_THREADS): - rlist = [Richards() for i in range(NUM_THREADS)] - to_do = [None] * iterations - startTime = time() - for r in rlist: - thread.start_new_thread(r.run_and_unlock, (to_do,)) - for r in rlist: - r.finished_lock.acquire() - endTime = time() - assert to_do == [] - result = all(r.result for r in rlist) - return result, startTime, endTime - -def main(entry_point = entry_point, iterations = 10, threads = 4): - print "Richards benchmark (Python) starting... [%r]" % entry_point - result, startTime, endTime = entry_point(iterations, threads) - if not result: - print "Incorrect results!" - return -1 - print "finished." - total_s = endTime - startTime - print "Total time for %d iterations: %.2f secs" %(iterations,total_s) - print "Average time per iteration: %.2f ms" %(total_s*1000/iterations) - return 42 - -if __name__ == '__main__': - import sys - main(iterations = int(sys.argv[1]), - threads = int(sys.argv[2])) diff --git a/unladen_swallow/performance/richards.py b/unladen_swallow/performance/richards.py --- a/unladen_swallow/performance/richards.py +++ b/unladen_swallow/performance/richards.py @@ -305,7 +305,7 @@ i.control /= 2 return self.release(I_DEVA) else: - i.control = (i.control/2) ^ 0xd008 + i.control = i.control/2 ^ 0xd008 return self.release(I_DEVB) From noreply at buildbot.pypy.org Tue Mar 25 09:27:24 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 25 Mar 2014 09:27:24 +0100 (CET) Subject: [pypy-commit] benchmarks default: add multithread richards Message-ID: <20140325082724.66D001C066C@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r239:d505ab213546 Date: 2014-03-24 21:26 +0200 http://bitbucket.org/pypy/benchmarks/changeset/d505ab213546/ Log: add multithread richards diff --git a/multithread/multithread-richards.py b/multithread/multithread-richards.py new file mode 100755 --- /dev/null +++ b/multithread/multithread-richards.py @@ -0,0 +1,458 @@ +# based on a Java version: +# Based on original version written in BCPL by Dr Martin Richards +# in 1981 at Cambridge University Computer Laboratory, England +# and a C++ version derived from a Smalltalk version written by +# L Peter Deutsch. +# Java version: Copyright (C) 1995 Sun Microsystems, Inc. +# Translation from C++, Mario Wolczko +# Outer loop added by Alex Jacoby + +import thread, os +#from __pypy__.thread import atomic + + +# Task IDs +I_IDLE = 1 +I_WORK = 2 +I_HANDLERA = 3 +I_HANDLERB = 4 +I_DEVA = 5 +I_DEVB = 6 + +# Packet types +K_DEV = 1000 +K_WORK = 1001 + +# Packet + +BUFSIZE = 4 + +BUFSIZE_RANGE = range(BUFSIZE) + +class Packet(object): + def __init__(self,l,i,k): + self.link = l + self.ident = i + self.kind = k + self.datum = 0 + self.data = [0] * BUFSIZE + + def append_to(self,lst): + self.link = None + if lst is None: + return self + else: + p = lst + next = p.link + while next is not None: + p = next + next = p.link + p.link = self + return lst + +# Task Records + +class TaskRec(object): + pass + +class DeviceTaskRec(TaskRec): + def __init__(self): + self.pending = None + +class IdleTaskRec(TaskRec): + def __init__(self): + self.control = 1 + self.count = 10000 + +class HandlerTaskRec(TaskRec): + def __init__(self): + self.work_in = None + self.device_in = None + + def workInAdd(self,p): + self.work_in = p.append_to(self.work_in) + return self.work_in + + def deviceInAdd(self,p): + self.device_in = p.append_to(self.device_in) + return self.device_in + +class WorkerTaskRec(TaskRec): + def __init__(self): + self.destination = I_HANDLERA + self.count = 0 +# Task + +class TaskState(object): + def __init__(self): + self.packet_pending = True + self.task_waiting = False + self.task_holding = False + + def packetPending(self): + self.packet_pending = True + self.task_waiting = False + self.task_holding = False + return self + + def waiting(self): + self.packet_pending = False + self.task_waiting = True + self.task_holding = False + return self + + def running(self): + self.packet_pending = False + self.task_waiting = False + self.task_holding = False + return self + + def waitingWithPacket(self): + self.packet_pending = True + self.task_waiting = True + self.task_holding = False + return self + + def isPacketPending(self): + return self.packet_pending + + def isTaskWaiting(self): + return self.task_waiting + + def isTaskHolding(self): + return self.task_holding + + def isTaskHoldingOrWaiting(self): + return self.task_holding or (not self.packet_pending and self.task_waiting) + + def isWaitingWithPacket(self): + return self.packet_pending and self.task_waiting and not self.task_holding + + + + + +tracing = False +layout = 0 + +def trace(a): + global layout + layout -= 1 + if layout <= 0: + print + layout = 50 + print a, + + +TASKTABSIZE = 10 + +class TaskWorkArea(object): + def __init__(self): + self.taskTab = [None] * TASKTABSIZE + + self.taskList = None + + self.holdCount = 0 + self.qpktCount = 0 + +class Task(TaskState): + + + def __init__(self,i,p,w,initialState,r, taskWorkArea): + self.taskWorkArea = taskWorkArea + self.link = taskWorkArea.taskList + self.ident = i + self.priority = p + self.input = w + + self.packet_pending = initialState.isPacketPending() + self.task_waiting = initialState.isTaskWaiting() + self.task_holding = initialState.isTaskHolding() + + self.handle = r + + taskWorkArea.taskList = self + taskWorkArea.taskTab[i] = self + + def fn(self,pkt,r): + raise NotImplementedError + + + def addPacket(self,p,old): + if self.input is None: + self.input = p + self.packet_pending = True + if self.priority > old.priority: + return self + else: + p.append_to(self.input) + return old + + + def runTask(self): + if self.isWaitingWithPacket(): + msg = self.input + self.input = msg.link + if self.input is None: + self.running() + else: + self.packetPending() + else: + msg = None + + return self.fn(msg,self.handle) + + + def waitTask(self): + self.task_waiting = True + return self + + + def hold(self): + self.taskWorkArea.holdCount += 1 + self.task_holding = True + return self.link + + + def release(self,i): + t = self.findtcb(i) + t.task_holding = False + if t.priority > self.priority: + return t + else: + return self + + + def qpkt(self,pkt): + t = self.findtcb(pkt.ident) + self.taskWorkArea.qpktCount += 1 + pkt.link = None + pkt.ident = self.ident + return t.addPacket(pkt,self) + + + def findtcb(self,id): + t = self.taskWorkArea.taskTab[id] + if t is None: + raise Exception("Bad task id %d" % id) + return t + + +# DeviceTask + + +class DeviceTask(Task): + def __init__(self,i,p,w,s,r, taskWorkArea): + Task.__init__(self,i,p,w,s,r, taskWorkArea) + + def fn(self,pkt,r): + d = r + assert isinstance(d, DeviceTaskRec) + if pkt is None: + pkt = d.pending + if pkt is None: + return self.waitTask() + else: + d.pending = None + return self.qpkt(pkt) + else: + d.pending = pkt + if tracing: trace(pkt.datum) + return self.hold() + + + +class HandlerTask(Task): + def __init__(self,i,p,w,s,r, taskWorkArea): + Task.__init__(self,i,p,w,s,r, taskWorkArea) + + def fn(self,pkt,r): + h = r + assert isinstance(h, HandlerTaskRec) + if pkt is not None: + if pkt.kind == K_WORK: + h.workInAdd(pkt) + else: + h.deviceInAdd(pkt) + work = h.work_in + if work is None: + return self.waitTask() + count = work.datum + if count >= BUFSIZE: + h.work_in = work.link + return self.qpkt(work) + + dev = h.device_in + if dev is None: + return self.waitTask() + + h.device_in = dev.link + dev.datum = work.data[count] + work.datum = count + 1 + return self.qpkt(dev) + +# IdleTask + + +class IdleTask(Task): + def __init__(self,i,p,w,s,r, taskWorkArea): + Task.__init__(self,i,0,None,s,r, taskWorkArea) + + def fn(self,pkt,r): + i = r + assert isinstance(i, IdleTaskRec) + i.count -= 1 + if i.count == 0: + return self.hold() + elif i.control & 1 == 0: + i.control /= 2 + return self.release(I_DEVA) + else: + i.control = i.control/2 ^ 0xd008 + return self.release(I_DEVB) + + +# WorkTask + + +A = ord('A') + +class WorkTask(Task): + def __init__(self,i,p,w,s,r, taskWorkArea): + Task.__init__(self,i,p,w,s,r, taskWorkArea) + + def fn(self,pkt,r): + w = r + assert isinstance(w, WorkerTaskRec) + if pkt is None: + return self.waitTask() + + if w.destination == I_HANDLERA: + dest = I_HANDLERB + else: + dest = I_HANDLERA + + w.destination = dest + pkt.ident = dest + pkt.datum = 0 + + for i in BUFSIZE_RANGE: # xrange(BUFSIZE) + w.count += 1 + if w.count > 26: + w.count = 1 + pkt.data[i] = A + w.count - 1 + + return self.qpkt(pkt) + +try: + from time import time +except ImportError: + def time(): + return 0 + + +def schedule(taskWorkArea): + t = taskWorkArea.taskList + while t is not None: + pkt = None + + if tracing: + print "tcb =",t.ident + + if t.isTaskHoldingOrWaiting(): + t = t.link + else: + if tracing: trace(chr(ord("0")+t.ident)) + t = t.runTask() + +class Richards(object): + + def __init__(self): + self.finished_lock = thread.allocate_lock() + self.finished_lock.acquire() + self.taskWorkArea = TaskWorkArea() + + def run_and_unlock(self, to_do): + os.write(1, 'running...\n') + iterations = 0 + self.result = True + while 1: + try: + to_do.pop() + except IndexError: + break + iterations += 1 + self.result = self.run(self.taskWorkArea) + os.write(1, 'done, iterations=%d, result=%r\n' % (iterations, self.result)) + self.finished_lock.release() + + def run(self, taskWorkArea): + #with atomic: + if 1: + taskWorkArea.holdCount = 0 + taskWorkArea.qpktCount = 0 + + IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec(), + taskWorkArea) + + wkq = Packet(None, 0, K_WORK) + wkq = Packet(wkq , 0, K_WORK) + WorkTask(I_WORK, 1000, wkq, TaskState().waitingWithPacket(), WorkerTaskRec(), + taskWorkArea) + + wkq = Packet(None, I_DEVA, K_DEV) + wkq = Packet(wkq , I_DEVA, K_DEV) + wkq = Packet(wkq , I_DEVA, K_DEV) + HandlerTask(I_HANDLERA, 2000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec(), + taskWorkArea) + + wkq = Packet(None, I_DEVB, K_DEV) + wkq = Packet(wkq , I_DEVB, K_DEV) + wkq = Packet(wkq , I_DEVB, K_DEV) + HandlerTask(I_HANDLERB, 3000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec(), + taskWorkArea) + + wkq = None; + DeviceTask(I_DEVA, 4000, wkq, TaskState().waiting(), DeviceTaskRec(), + taskWorkArea) + DeviceTask(I_DEVB, 5000, wkq, TaskState().waiting(), DeviceTaskRec(), + taskWorkArea) + + schedule(taskWorkArea) + + if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246: + pass + else: + return False + + return True + +def entry_point(iterations, NUM_THREADS): + rlist = [Richards() for i in range(NUM_THREADS)] + to_do = [None] * iterations + startTime = time() + for r in rlist: + thread.start_new_thread(r.run_and_unlock, (to_do,)) + for r in rlist: + r.finished_lock.acquire() + endTime = time() + assert to_do == [] + result = all(r.result for r in rlist) + return result, startTime, endTime + +def main(entry_point = entry_point, iterations = 10, threads = 4): + print "Richards benchmark (Python) starting... [%r]" % entry_point + result, startTime, endTime = entry_point(iterations, threads) + if not result: + print "Incorrect results!" + return -1 + print "finished." + total_s = endTime - startTime + print "Total time for %d iterations: %.2f secs" %(iterations,total_s) + print "Average time per iteration: %.2f ms" %(total_s*1000/iterations) + return 42 + +if __name__ == '__main__': + import sys + main(iterations = int(sys.argv[1]), + threads = int(sys.argv[2])) From noreply at buildbot.pypy.org Tue Mar 25 09:33:08 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 25 Mar 2014 09:33:08 +0100 (CET) Subject: [pypy-commit] benchmarks default: add some multithreaded benchmarks and a script to run them (optional) Message-ID: <20140325083308.ADE601C309D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r240:06ac9ee33205 Date: 2014-03-25 09:33 +0100 http://bitbucket.org/pypy/benchmarks/changeset/06ac9ee33205/ Log: add some multithreaded benchmarks and a script to run them (optional) diff --git a/multithread/bench.py b/multithread/bench.py new file mode 100644 --- /dev/null +++ b/multithread/bench.py @@ -0,0 +1,116 @@ +#!/usr/bin/python + +import time +import math +import imp, os, sys +import json +import contextlib + +def import_file(filepath): + mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1]) + return imp.load_source(mod_name, filepath) + + +class DummyFile(object): + def write(self, x): pass + + at contextlib.contextmanager +def nostdout(): + save_stdout = sys.stdout + sys.stdout = DummyFile() + yield + sys.stdout = save_stdout + + +def avg(xs): + return sum(xs) / len(xs) + +def std_dev(xs): + N = len(xs) + mu = avg(xs) + var = sum([(x - mu)**2 for x in xs]) / N + return math.sqrt(var) + +def get_error(times): + ts = sorted(times)[:args.k] + best = float(ts[0]) + + return max((t / best) - 1.0 for t in ts) + +def within_error(args, times): + return get_error(times) < args.error + +def main(args): + basedir = os.path.abspath(os.path.dirname(__file__)) + sys.path.insert(0, basedir+'/') + import common + print __file__ + folder = os.path.dirname(args.file) + os.chdir(folder) + sys.path.insert(0, os.path.abspath('.')) + test = import_file(os.path.basename(args.file)) + + times = [] + k = 1 + try: + while True: + time.sleep(0.2) + if not args.q: + print "Run {}/{}:".format(k, args.k) + + test_time = time.time() + if args.p: + test.run(*args.more) + else: + with nostdout(): + test.run(*args.more) + times.append(time.time() - test_time) + + if not args.q: + print "took {} s".format(times[-1]) + + if k >= args.k: + if within_error(args, times): + break + elif not args.q: + print "error was not within", args.error + + if k > 2 * args.k: + if not args.q: + print "max number of iterations reached", \ + "error still too great, finish anyway" + break + k += 1 + finally: + if not args.q: + print "times:", times + + if times: + times = sorted(times)[:args.k] + result = {'best':min(times), + 'error':get_error(times), + 'std_dev(k)':std_dev(times)} + print json.dumps(result) + + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('-k', default=3, help='K-best K', type=int) + parser.add_argument('-e', '--error', default=0.05, type=float, + help='relative allowed error [0.05]') + parser.add_argument('-q', action='store_const', + const=True, default=False, + help='mute except for best run') + parser.add_argument('-p', action='store_const', + const=True, default=False, + help='print to stdout what the benchmark prints') + parser.add_argument('file', help='file to run') + parser.add_argument('more', nargs="*", help='file.run() arguments') + + args = parser.parse_args() + if not args.q: + print args + main(args) diff --git a/multithread/common/__init__.py b/multithread/common/__init__.py new file mode 100644 diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py new file mode 100644 --- /dev/null +++ b/multithread/common/abstract_threading.py @@ -0,0 +1,119 @@ +from Queue import Queue, Empty, Full +from threading import Thread, Condition, Lock +import thread + +try: + from __pypy__.thread import atomic +except ImportError: + atomic = Lock() + +class Worker(Thread): + """Thread executing tasks from a given tasks queue""" + def __init__(self, queue): + Thread.__init__(self) + self.daemon = True + self.next_task = None + self.cond = Condition() + self.queue = queue + self.start() + + def run(self): + # the next line registers the at_commit_cb on interpreter + # level for this thread. This should be fixed in the + # interpreter (it causes a conflict in stmgcintf.register_at_commit_cb). + # thread.at_commit(lambda : 0, ()) + + while True: + with self.cond: + while self.next_task is None: + self.cond.wait() + + func, args, kargs = self.next_task + self.next_task = None + + try: + func(*args, **kargs) + except Exception as e: + print e + + # first time put in queue by threadpool on creation + try: + self.queue.put_nowait(self) + except Full: + # thread limit reached, I'll show myself out.. + return + + +class ThreadPool(object): + def __init__(self, thread_queue_size=12): + self.threads = Queue(thread_queue_size) + + def add_task(self, func, *args, **kargs): + try: + worker = self.threads.get_nowait() + except Empty: + worker = Worker(self.threads) + + with worker.cond: + worker.next_task = (func, args, kargs) + worker.cond.notify_all() + + + + +import multiprocessing +_thread_pool = ThreadPool(3 * multiprocessing.cpu_count()) + + + + +class Future(object): + def __init__(self, func, *args, **kwargs): + self._done = False + self._result = None + self._exception = None + self._cond = Condition() + + assert hasattr(func, "__call__") + + _thread_pool.add_task(self._task, func, *args, **kwargs) + + + def _task(self, func, *args, **kwargs): + with self._cond: + try: + self._result = func(*args, **kwargs) + except Exception as e: + self._exception = e + finally: + self._done = True + # several points/threads in the program + # may wait for the result (notify_all): + self._cond.notify_all() + + + def __call__(self): + with self._cond: + while not self._done: + self._cond.wait() + + if self._exception: + raise self._exception + + return self._result + + + +class AtomicFuture(Future): + def _task(self, func, *args, **kwargs): + with self._cond: + try: + with atomic: + self._result = func(*args, **kwargs) + except Exception as e: + self._exception = e + finally: + self._done = True + # several points/threads in the program + # may wait for the result (notify_all): + self._cond.notify_all() diff --git a/multithread/mandelbrot/mandelbrot.py b/multithread/mandelbrot/mandelbrot.py new file mode 100644 --- /dev/null +++ b/multithread/mandelbrot/mandelbrot.py @@ -0,0 +1,80 @@ +from common.abstract_threading import Future, atomic +import Image, sys + + +def calculate(a, b, im_size, max_iter=255): + print "a:%s, b:%s, im_size:%s" % (a, b, im_size) + ar, ai = a + br, bi = b + width, height = im_size + imag_step = (bi - ai) / (height - 1) + real_step = (br - ar) / (width - 1) + print "real/width:%s, imag/height:%s" % (real_step, imag_step) + + with atomic: + result = [[0] * width for y in xrange(height)] + for y in xrange(height): + zi = ai + y * imag_step + for x in xrange(width): + zr = ar + x * real_step + z = complex(zr, zi) + c = z + for i in xrange(max_iter): + if abs(z) > 2.0: + break + z = z * z + c + result[y][x] = i + + return result + +def save_img(image, file_name='out.png'): + im = Image.new("RGB", (len(image[0]), len(image))) + out = im.load() + + for y in xrange(len(image)): + for x in xrange(len(image[0])): + c = image[y][x] + out[x,y] = c, c, c + im.save(file_name, 'PNG') + +def save_to_file(image, file_name='out.txt'): + with atomic: + s = "\n".join(map(str, image)) + with open(file_name, 'w') as f: + f.write(s) + + +def merge_imgs(imgs): + res = [] + for img in imgs: + for y in img: + res.append(y) + return res + + +def run(threads=2): + threads = int(threads) + ar, ai = -2.0, -1.5 + br, bi = 1.0, 1.5 + width, height = 4096, 4096 + + step = (bi - ai) / threads + res = [] + ai = -1.5 + bi = ai + step + for i in xrange(threads): + res.append(Future(calculate, + a=(ar, ai + i * step), + b=(br, bi + i * step), + im_size=(width, int(height / threads)) + )) + + res = [f() for f in res] + return merge_imgs(res) + + + +if __name__ == '__main__': + image = run(int(sys.argv[1])) + save_to_file(image) + # save_img(image) don't run on STM, allocates 4000GB of memory diff --git a/multithread/raytrace/raytrace.py b/multithread/raytrace/raytrace.py new file mode 100644 --- /dev/null +++ b/multithread/raytrace/raytrace.py @@ -0,0 +1,190 @@ +# From http://www.reddit.com/r/tinycode/comments/169ri9/ray_tracer_in_140_sloc_of_python_with_picture/ +# Date: 14.03.2013 + +from math import sqrt, pow, pi +from common.abstract_threading import atomic, Future +import time + +AMBIENT = 0.1 + + + +class Vector(object): + def __init__(self,x,y,z): + self.x = x + self.y = y + self.z = z + + def dot(self, b): + return self.x*b.x + self.y*b.y + self.z*b.z + + def cross(self, b): + return (self.y*b.z-self.z*b.y, self.z*b.x-self.x*b.z, self.x*b.y-self.y*b.x) + + def magnitude(self): + return sqrt(self.x*self.x+self.y*self.y+self.z*self.z) + + def normal(self): + mag = self.magnitude() + return Vector(self.x/mag,self.y/mag,self.z/mag) + + def __add__(self, b): + return Vector(self.x + b.x, self.y+b.y, self.z+b.z) + + def __sub__(self, b): + return Vector(self.x-b.x, self.y-b.y, self.z-b.z) + + def __mul__(self, b): + #assert type(b) == float or type(b) == int + return Vector(self.x*b, self.y*b, self.z*b) + + +class Sphere(object): + def __init__(self, center, radius, color): + self.c = center + self.r = radius + self.col = color + + def intersection(self, l): + q = l.d.dot(l.o - self.c)**2 - (l.o - self.c).dot(l.o - self.c) + self.r**2 + if q < 0: + return Intersection( Vector(0,0,0), -1, Vector(0,0,0), self) + else: + d = -l.d.dot(l.o - self.c) + d1 = d - sqrt(q) + d2 = d + sqrt(q) + if 0 < d1 and ( d1 < d2 or d2 < 0): + return Intersection(l.o+l.d*d1, d1, self.normal(l.o+l.d*d1), self) + elif 0 < d2 and ( d2 < d1 or d1 < 0): + return Intersection(l.o+l.d*d2, d2, self.normal(l.o+l.d*d2), self) + else: + return Intersection( Vector(0,0,0), -1, Vector(0,0,0), self) + + def normal(self, b): + return (b - self.c).normal() + + +class Plane(object): + def __init__(self, point, normal, color): + self.n = normal + self.p = point + self.col = color + + def intersection(self, l): + d = l.d.dot(self.n) + if d == 0: + return Intersection( Vector(0,0,0), -1, Vector(0,0,0), self) + else: + d = (self.p - l.o).dot(self.n) / d + return Intersection(l.o+l.d*d, d, self.n, self) + + +class Ray(object): + def __init__(self, origin, direction): + self.o = origin + self.d = direction + + +class Intersection(object): + def __init__(self, point, distance, normal, obj): + self.p = point + self.d = distance + self.n = normal + self.obj = obj + + +def testRay(ray, objects, ignore=None): + intersect = Intersection( Vector(0,0,0), -1, Vector(0,0,0), None) + + for obj in objects: + if obj is not ignore: + currentIntersect = obj.intersection(ray) + if currentIntersect.d > 0 and intersect.d < 0: + intersect = currentIntersect + elif 0 < currentIntersect.d < intersect.d: + intersect = currentIntersect + return intersect + + +def trace(ray, objects, light, maxRecur): + if maxRecur < 0: + return (0,0,0) + intersect = testRay(ray, objects) + if intersect.d == -1: + col = Vector(AMBIENT,AMBIENT,AMBIENT) + elif intersect.n.dot(light - intersect.p) < 0: + col = intersect.obj.col * AMBIENT + else: + lightRay = Ray(intersect.p, (light-intersect.p).normal()) + if testRay(lightRay, objects, intersect.obj).d == -1: + lightIntensity = 1000.0/(4*pi*(light-intersect.p).magnitude()**2) + col = intersect.obj.col * max(intersect.n.normal().dot((light - intersect.p).normal()*lightIntensity), AMBIENT) + else: + col = intersect.obj.col * AMBIENT + return col + + + +tasks = 0 +def task(x, h, cameraPos, objs, lightSource): + # force a transaction break here (STM not yet smart enough + # to figure out that it should break here) + time.sleep(0) + + with atomic: + for y in range(h): + ray = Ray(cameraPos, + (Vector(x/50.0-5,y/50.0-5,0)-cameraPos).normal()) + trace(ray, objs, lightSource, 10) + + # force a transaction break. updating a global var should + # be done in a separate transaction: + time.sleep(0) + + global tasks + with atomic: + tasks -= 1 + time.sleep(0) + +futures = [] +def future_dispatcher(ths, *args): + global tasks + + while tasks >= ths: + time.sleep(0) + + with atomic: + tasks += 1 + + futures.append(Future(task, *args)) + time.sleep(0) + + + + +def run(ths=8, w=1024, h=1024): + ths = int(ths) + w = int(w) + h = int(h) + + objs = [] + objs.append(Sphere( Vector(-2,0,-10), 2, Vector(0,255,0))) + objs.append(Sphere( Vector(2,0,-10), 3.5, Vector(255,0,0))) + objs.append(Sphere( Vector(0,-4,-10), 3, Vector(0,0,255))) + objs.append(Plane( Vector(0,0,-12), Vector(0,0,1), Vector(255,255,255))) + lightSource = Vector(0,10,0) + + cameraPos = Vector(0,0,20) + + for x in range(w): + print x + future_dispatcher(ths, x, h, cameraPos, objs, lightSource) + + for f in futures: + f() + del futures[:] + assert tasks == 0 + + +if __name__ == '__main__': + run() From noreply at buildbot.pypy.org Tue Mar 25 09:53:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 09:53:00 +0100 (CET) Subject: [pypy-commit] stmgc default: Failing test Message-ID: <20140325085300.29ACC1C309D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1091:a51ebf0218a9 Date: 2014-03-25 09:52 +0100 http://bitbucket.org/pypy/stmgc/changeset/a51ebf0218a9/ Log: Failing test diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -338,3 +338,22 @@ self.switch(1) lp1 = self.pop_root() assert stm_get_weakref(lp1) == lp0 + + def test_weakref_bug2(self): + self.start_transaction() + lp0 = stm_allocate(16) + self.push_root(lp0) + self.commit_transaction() + # + self.start_transaction() + lp0 = self.pop_root() + self.push_root(lp0) + stm_write(lp0) # privatize page + lp1 = stm_allocate_weakref(lp0) # young object + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() # overflow object + self.push_root(lp1) + # + self.switch(1) + stm_major_collect() From noreply at buildbot.pypy.org Tue Mar 25 10:01:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 10:01:27 +0100 (CET) Subject: [pypy-commit] stmgc default: Improve the test so that it crashes more reliably (this includes Message-ID: <20140325090127.41EBC1C309D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1092:d8fe314589b8 Date: 2014-03-25 10:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/d8fe314589b8/ Log: Improve the test so that it crashes more reliably (this includes older revisions now) diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -340,20 +340,23 @@ assert stm_get_weakref(lp1) == lp0 def test_weakref_bug2(self): - self.start_transaction() - lp0 = stm_allocate(16) - self.push_root(lp0) - self.commit_transaction() + def make_wr(): + self.start_transaction() + lp0 = stm_allocate(16) + self.push_root(lp0) + self.commit_transaction() + # + self.start_transaction() + lp0 = self.pop_root() + self.push_root(lp0) + stm_write(lp0) # privatize page + lp1 = stm_allocate_weakref(lp0) # young object + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() # overflow object + self.push_root(lp1) # - self.start_transaction() - lp0 = self.pop_root() - self.push_root(lp0) - stm_write(lp0) # privatize page - lp1 = stm_allocate_weakref(lp0) # young object - self.push_root(lp1) - stm_minor_collect() - lp1 = self.pop_root() # overflow object - self.push_root(lp1) - # + make_wr() self.switch(1) + make_wr() stm_major_collect() From noreply at buildbot.pypy.org Tue Mar 25 10:02:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 10:02:48 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix for d8fe314589b8 Message-ID: <20140325090248.8FC481C309D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1093:5575626c8253 Date: 2014-03-25 10:02 +0100 http://bitbucket.org/pypy/stmgc/changeset/5575626c8253/ Log: Fix for d8fe314589b8 diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c --- a/c7/stm/weakref.c +++ b/c7/stm/weakref.c @@ -126,7 +126,7 @@ ssize_t size = 16; stm_char *wr = (stm_char *)WEAKREF_PTR(weakref, size); - char *real_wr = REAL_ADDRESS(stm_object_pages, wr); + char *real_wr = REAL_ADDRESS(pseg->pub.segment_base, wr); object_t *pointing_to = *(object_t **)real_wr; assert(pointing_to != NULL); if (!mark_visited_test(pointing_to)) { From noreply at buildbot.pypy.org Tue Mar 25 10:03:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 10:03:56 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/5575626c8253 Message-ID: <20140325090356.81BEB1C309D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70274:a8d2c3d28939 Date: 2014-03-25 10:03 +0100 http://bitbucket.org/pypy/pypy/changeset/a8d2c3d28939/ Log: import stmgc/5575626c8253 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -510720112e4c +5575626c8253 diff --git a/rpython/translator/stm/src_stm/stm/weakref.c b/rpython/translator/stm/src_stm/stm/weakref.c --- a/rpython/translator/stm/src_stm/stm/weakref.c +++ b/rpython/translator/stm/src_stm/stm/weakref.c @@ -127,7 +127,7 @@ ssize_t size = 16; stm_char *wr = (stm_char *)WEAKREF_PTR(weakref, size); - char *real_wr = REAL_ADDRESS(stm_object_pages, wr); + char *real_wr = REAL_ADDRESS(pseg->pub.segment_base, wr); object_t *pointing_to = *(object_t **)real_wr; assert(pointing_to != NULL); if (!mark_visited_test(pointing_to)) { From noreply at buildbot.pypy.org Tue Mar 25 11:46:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 11:46:44 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Missing transactionsafe. Add a no_collect at a place where it used Message-ID: <20140325104644.A22A11C066C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70275:b832e861aef4 Date: 2014-03-25 11:45 +0100 http://bitbucket.org/pypy/pypy/changeset/b832e861aef4/ Log: Missing transactionsafe. Add a no_collect at a place where it used to collect (because of stm_become_inevitable()). diff --git a/rpython/jit/backend/llsupport/jitframe.py b/rpython/jit/backend/llsupport/jitframe.py --- a/rpython/jit/backend/llsupport/jitframe.py +++ b/rpython/jit/backend/llsupport/jitframe.py @@ -3,7 +3,7 @@ from rpython.rlib.objectmodel import specialize from rpython.rlib.debug import ll_assert from rpython.rlib.objectmodel import enforceargs -from rpython.rlib.rgc import stm_is_enabled +from rpython.rlib import rgc SIZEOFSIGNED = rffi.sizeof(lltype.Signed) IS_32BIT = (SIZEOFSIGNED == 4) @@ -15,10 +15,11 @@ GCMAP = lltype.Array(lltype.Unsigned) NULLGCMAP = lltype.nullptr(GCMAP) + at rgc.no_collect @enforceargs(None, int, int) def jitframeinfo_update_depth(jfi, base_ofs, new_depth): # - if stm_is_enabled(): + if rgc.stm_is_enabled(): from rpython.rlib.atomic_ops import bool_cas # careful here, 'jfi' has 'stm_dont_track_raw_accesses' while True: diff --git a/rpython/rlib/atomic_ops.py b/rpython/rlib/atomic_ops.py --- a/rpython/rlib/atomic_ops.py +++ b/rpython/rlib/atomic_ops.py @@ -16,8 +16,10 @@ bool_cas = rffi.llexternal('pypy_bool_cas', [llmemory.Address]*3, lltype.Bool, - compilation_info=eci, macro=True, _nowrapper=True) + compilation_info=eci, macro=True, _nowrapper=True, + transactionsafe=True) fetch_and_add = rffi.llexternal('pypy_fetch_and_add', [llmemory.Address, lltype.Signed], lltype.Signed, compilation_info=eci, - macro=True, _nowrapper=True) + macro=True, _nowrapper=True, + transactionsafe=True) From noreply at buildbot.pypy.org Tue Mar 25 14:17:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 14:17:12 +0100 (CET) Subject: [pypy-commit] stmgc default: Add an extra argument to stm_become_inevitable() just for testing. Message-ID: <20140325131712.5D3061C066C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1094:4024e96d9ba2 Date: 2014-03-25 14:10 +0100 http://bitbucket.org/pypy/stmgc/changeset/4024e96d9ba2/ Log: Add an extra argument to stm_become_inevitable() just for testing. diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -285,12 +285,12 @@ return (objptr_t)-1; // break current } else if (get_rand(20) == 1) { push_roots(); - stm_become_inevitable("please"); + stm_become_inevitable(&stm_thread_local, "please"); pop_roots(); return NULL; } else if (get_rand(240) == 1) { push_roots(); - stm_become_globally_unique_transaction("really"); + stm_become_globally_unique_transaction(&stm_thread_local, "really"); fprintf(stderr, "[GUT/%d]", (int)STM_SEGMENT->segment_num); pop_roots(); return NULL; diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -676,9 +676,10 @@ s_mutex_unlock(); } -void stm_become_globally_unique_transaction(const char *msg) +void stm_become_globally_unique_transaction(stm_thread_local_t *tl, + const char *msg) { - stm_become_inevitable(msg); /* may still abort */ + stm_become_inevitable(tl, msg); /* may still abort */ s_mutex_lock(); synchronize_all_threads(STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE); diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -60,7 +60,7 @@ bool was_in_transaction = _stm_in_transaction(this_tl); if (was_in_transaction) { - stm_become_inevitable("fork"); + stm_become_inevitable(this_tl, "fork"); /* Note that the line above can still fail and abort, which should be fine */ } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -275,7 +275,9 @@ /* Turn the current transaction inevitable. The 'jmpbuf' passed to STM_START_TRANSACTION() is not going to be used any more after this call (but the stm_become_inevitable() itself may still abort). */ -static inline void stm_become_inevitable(const char* msg) { +static inline void stm_become_inevitable(stm_thread_local_t *tl, + const char* msg) { + assert(STM_SEGMENT->running_thread == tl); if (STM_SEGMENT->jmpbuf_ptr != NULL) _stm_become_inevitable(msg); } @@ -330,7 +332,8 @@ transaction is running concurrently. Avoid as much as possible. Other transactions will continue running only after this transaction commits. */ -void stm_become_globally_unique_transaction(const char *msg); +void stm_become_globally_unique_transaction(stm_thread_local_t *tl, + const char *msg); /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -53,8 +53,8 @@ void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); bool _check_commit_transaction(void); bool _check_abort_transaction(void); -bool _check_become_inevitable(void); -bool _check_become_globally_unique_transaction(void); +bool _check_become_inevitable(stm_thread_local_t *tl); +bool _check_become_globally_unique_transaction(stm_thread_local_t *tl); int stm_is_inevitable(void); void _set_type_id(object_t *obj, uint32_t h); @@ -158,12 +158,12 @@ CHECKED(stm_abort_transaction()); } -bool _check_become_inevitable() { - CHECKED(stm_become_inevitable("TEST")); +bool _check_become_inevitable(stm_thread_local_t *tl) { + CHECKED(stm_become_inevitable(tl, "TEST")); } -bool _check_become_globally_unique_transaction() { - CHECKED(stm_become_globally_unique_transaction("TESTGUT")); +bool _check_become_globally_unique_transaction(stm_thread_local_t *tl) { + CHECKED(stm_become_globally_unique_transaction(tl, "TESTGUT")); } #undef CHECKED @@ -358,14 +358,6 @@ if lib._check_stop_safe_point(): raise Conflict() -def stm_become_inevitable(): - if lib._check_become_inevitable(): - raise Conflict() - -def stm_become_globally_unique_transaction(): - if lib._check_become_globally_unique_transaction(): - raise Conflict() - def stm_minor_collect(): lib.stm_collect(0) @@ -515,3 +507,13 @@ def set_thread_local_obj(self, newobj): tl = self.tls[self.current_thread] tl.thread_local_obj = newobj + + def become_inevitable(self): + tl = self.tls[self.current_thread] + if lib._check_become_inevitable(tl): + raise Conflict() + + def become_globally_unique_transaction(self): + tl = self.tls[self.current_thread] + if lib._check_become_globally_unique_transaction(tl): + raise Conflict() diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -383,7 +383,7 @@ stm_write(lp1) stm_set_char(lp1, 'b') assert lib.stm_is_inevitable() == 0 - stm_become_inevitable() + self.become_inevitable() assert lib.stm_is_inevitable() == 1 self.commit_transaction() # diff --git a/c7/test/test_extra.py b/c7/test/test_extra.py --- a/c7/test/test_extra.py +++ b/c7/test/test_extra.py @@ -86,7 +86,7 @@ # self.switch(1) self.start_transaction() - lib._check_become_globally_unique_transaction() + self.become_globally_unique_transaction() assert lib.stm_is_inevitable() # py.test.raises(Conflict, self.switch, 0) diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -346,7 +346,7 @@ thread_state.push_roots(ex) ex.do(raising_call(trs.check_must_abort(), - "stm_become_inevitable")) + "self.become_inevitable")) if trs.check_must_abort(): thread_state.abort_transaction() else: diff --git a/c7/test/test_weakref.py b/c7/test/test_weakref.py --- a/c7/test/test_weakref.py +++ b/c7/test/test_weakref.py @@ -176,7 +176,7 @@ # back in thread 0, we pop toref from the shadowstack # in an inevitable transaction self.start_transaction() - stm_become_inevitable() + self.become_inevitable() self.pop_root() # forget toref stm_major_collect() diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -126,7 +126,7 @@ } _du_save1(lst); - stm_become_inevitable("print"); + stm_become_inevitable(&stm_thread_local, "print"); _du_restore1(lst); int i; diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -95,8 +95,12 @@ _stm_tloc = NULL; if (pthread_mutex_unlock(&_stm_gil) != 0) abort(); } -inline static void stm_become_inevitable(const char *msg) { } +inline static void stm_become_inevitable( + stm_thread_local_t *tl, const char *msg) { } inline static void _stm_become_inevitable(const char *msg) { } +inline static void stm_become_globally_unique_transaction( + stm_thread_local_t *tl, const char *msg) { } + static inline int stm_is_inevitable(void) { return 1; } inline static void stm_read(object_t *ob) { } From noreply at buildbot.pypy.org Tue Mar 25 14:17:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 14:17:13 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix? Message-ID: <20140325131713.93C261C066C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1095:a631a7415a3d Date: 2014-03-25 14:16 +0100 http://bitbucket.org/pypy/stmgc/changeset/a631a7415a3d/ Log: Fix? diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -190,6 +190,8 @@ #ifndef NDEBUG pr->running_pthread = pthread_self(); #endif + pr->pub.running_thread->shadowstack = ( + pr->shadowstack_at_start_of_transaction); stm_abort_transaction(); } } From noreply at buildbot.pypy.org Tue Mar 25 14:17:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 14:17:14 +0100 (CET) Subject: [pypy-commit] stmgc default: Move this error-detection logic into abort_data_structures_from_segment_num() Message-ID: <20140325131714.B598B1C066C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1096:4d330c8e6b92 Date: 2014-03-25 14:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/4d330c8e6b92/ Log: Move this error-detection logic into abort_data_structures_from_segment_num() diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -580,6 +580,16 @@ */ struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); + switch (pseg->transaction_state) { + case TS_REGULAR: + break; + case TS_INEVITABLE: + stm_fatalerror("abort: transaction_state == TS_INEVITABLE"); + default: + stm_fatalerror("abort: bad transaction_state == %d", + (int)pseg->transaction_state); + } + /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -605,15 +615,6 @@ assert(_has_mutex()); dprintf(("~~~ ABORT\n")); - switch (STM_PSEGMENT->transaction_state) { - case TS_REGULAR: - break; - case TS_INEVITABLE: - stm_fatalerror("abort: transaction_state == TS_INEVITABLE"); - default: - stm_fatalerror("abort: bad transaction_state == %d", - (int)STM_PSEGMENT->transaction_state); - } assert(STM_PSEGMENT->running_pthread == pthread_self()); abort_data_structures_from_segment_num(STM_SEGMENT->segment_num); From noreply at buildbot.pypy.org Tue Mar 25 14:31:43 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 25 Mar 2014 14:31:43 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: . Message-ID: <20140325133143.8027F1C309D@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r689:154e1c60771d Date: 2014-03-20 16:20 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/154e1c60771d/ Log: . diff --git a/a b/a new file mode 100644 From noreply at buildbot.pypy.org Tue Mar 25 14:31:44 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 25 Mar 2014 14:31:44 +0100 (CET) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Added sorted output of strategy statistics, added other flag to control the output, added test. Message-ID: <20140325133144.997E81C309D@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r690:a58baa9918d7 Date: 2014-03-21 12:50 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a58baa9918d7/ Log: Added sorted output of strategy statistics, added other flag to control the output, added test. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -24,6 +24,7 @@ from rpython.tool.pairtype import extendabletype from rpython.rlib.objectmodel import instantiate, compute_hash, import_from_mixin, we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.listsort import TimSort from rsdl import RSDL, RSDL_helper class W_Object(object): @@ -613,12 +614,24 @@ className='W_PointersObject', additionalInformation='len=%d' % self.size()) +class StatsSorter(TimSort): + def lt(self, a, b): + if a[0] == b[0]: + if a[1] == b[1]: + return a[2] < b[2] + else: + return a[1] < b[1] + else: + return a[0] < b[0] + class StrategyStatistics(object): # Key: (operation_name, old_strategy, new_strategy) # Value: [sizes] stats = {} do_log = False do_stats = False + do_stats_sizes = False + def stat_operation(self, operation_name, old_strategy, new_strategy, size): key = (operation_name, old_strategy, new_strategy) if not key in self.stats: @@ -626,14 +639,19 @@ self.stats[key].append(size) def log_operation(self, op, new_strategy_tag, old_strategy_tag, classname, size): print "%s (%s, was %s) of %s size %d" % (op, new_strategy_tag, old_strategy_tag, classname, size) + def sorted_keys(self): + keys = [ x for x in self.stats ] + StatsSorter(keys).sort() + return keys def print_stats(self): - for key in self.stats: + for key in self.sorted_keys(): sizes = self.stats[key] sum = 0 for s in sizes: sum += s print "%s: %d times, avg size: %d" % (key, len(sizes), sum/len(sizes)) - print " All sizes: %s" % sizes + if self.do_stats_sizes: + print " All sizes: %s" % sizes strategy_stats = StrategyStatistics() class W_PointersObject(W_AbstractPointersObject): diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -170,4 +170,21 @@ a.store(space, 1, space.wrap_int(2)) assert isinstance(a.strategy, strategies.ListStorageStrategy) check_arr(a, [1.2, 2, w_nil, w_nil, w_nil]) + +def test_statistics(): + stats = model.StrategyStatistics() + stats.stat_operation("B", "old", "new", 3) + stats.stat_operation("B", "old", "new", 4) + stats.stat_operation("B", "old2", "new2", 20) + stats.stat_operation("B", "old", "new", 5) + stats.stat_operation("A", "old", "new", 1) + stats.stat_operation("A", "old", "new", 2) + stats.stat_operation("C", "old", "new", 10) + stats.stat_operation("C", "old", "new", 11) + keys = stats.sorted_keys() + assert keys == [ ("A", "old", "new"), ("B", "old", "new"), ("B", "old2", "new2"), ("C", "old", "new") ] + assert stats.stats[keys[0]] == [1, 2] + assert stats.stats[keys[1]] == [3, 4, 5] + assert stats.stats[keys[2]] == [20] + assert stats.stats[keys[3]] == [10, 11] \ No newline at end of file diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -128,6 +128,7 @@ -p|--poll_events --strategy-log --strategy-stats + --strategy-stats-with-sizes [image path, default: Squeak.image] """ % argv[0] @@ -188,6 +189,9 @@ model.strategy_stats.do_log = True elif arg == "--strategy-stats": model.strategy_stats.do_stats = True + elif arg == "--strategy-stats-with-sizes": + model.strategy_stats.do_stats = True + model.strategy_stats.do_stats_sizes = True elif path is None: path = argv[idx] else: From noreply at buildbot.pypy.org Tue Mar 25 14:31:45 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 25 Mar 2014 14:31:45 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Merged strategies-tagging. Message-ID: <20140325133145.B93CD1C309D@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r691:80ad6308c3db Date: 2014-03-24 10:59 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/80ad6308c3db/ Log: Merged strategies-tagging. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -24,6 +24,7 @@ from rpython.tool.pairtype import extendabletype from rpython.rlib.objectmodel import instantiate, compute_hash, import_from_mixin, we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.listsort import TimSort from rsdl import RSDL, RSDL_helper class W_Object(object): @@ -613,12 +614,24 @@ className='W_PointersObject', additionalInformation='len=%d' % self.size()) +class StatsSorter(TimSort): + def lt(self, a, b): + if a[0] == b[0]: + if a[1] == b[1]: + return a[2] < b[2] + else: + return a[1] < b[1] + else: + return a[0] < b[0] + class StrategyStatistics(object): # Key: (operation_name, old_strategy, new_strategy) # Value: [sizes] stats = {} do_log = False do_stats = False + do_stats_sizes = False + def stat_operation(self, operation_name, old_strategy, new_strategy, size): key = (operation_name, old_strategy, new_strategy) if not key in self.stats: @@ -626,14 +639,19 @@ self.stats[key].append(size) def log_operation(self, op, new_strategy_tag, old_strategy_tag, classname, size): print "%s (%s, was %s) of %s size %d" % (op, new_strategy_tag, old_strategy_tag, classname, size) + def sorted_keys(self): + keys = [ x for x in self.stats ] + StatsSorter(keys).sort() + return keys def print_stats(self): - for key in self.stats: + for key in self.sorted_keys(): sizes = self.stats[key] sum = 0 for s in sizes: sum += s print "%s: %d times, avg size: %d" % (key, len(sizes), sum/len(sizes)) - print " All sizes: %s" % sizes + if self.do_stats_sizes: + print " All sizes: %s" % sizes strategy_stats = StrategyStatistics() class W_PointersObject(W_AbstractPointersObject): @@ -644,10 +662,10 @@ @jit.unroll_safe def __init__(self, space, w_class, size): - from spyvm.strategies import strategy_of_size + from spyvm.strategies import empty_strategy """Create new object with size = fixed + variable size.""" W_AbstractPointersObject.__init__(self, space, w_class, size) - self.strategy = strategy_of_size(self.s_class, size) + self.strategy = empty_strategy(self.s_class) self.initialize_storage(space, size) self.log_strategy_operation("Initialized") diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -1,8 +1,11 @@ -import sys -from spyvm import model, shadow -from rpython.rlib import rerased +import sys, math +from spyvm import model, shadow, constants +from rpython.rlib import longlong2float, rarithmetic +from rpython.rlib.rstruct.runpack import runpack +from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.objectmodel import import_from_mixin +from rpython.rlib.rfloat import string_to_float # Disables all optimized strategies, for debugging. only_list_storage = False @@ -21,22 +24,32 @@ def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): raise NotImplementedError("Abstract base class") + def store(self, space, w_obj, n0, w_val): + if self.can_contain(space, w_val): + return self.do_store(space, w_obj, n0, w_val) + new_strategy = self.generelized_strategy_for(space, w_val) + return w_obj.store_with_new_strategy(space, new_strategy, n0, w_val) + + def generelized_strategy_for(self, space, w_val): + raise NotImplementedError("Abstract base class") + def can_contain(self, space, w_val): + raise NotImplementedError("Abstract base class") def fetch(self, space, w_obj, n0): raise NotImplementedError("Abstract base class") - def store(self, space, w_obj, n0, w_val): + def do_store(self, space, w_obj, n0, w_val): raise NotImplementedError("Abstract base class") class AbstractListStorageStrategy(AbstractStorageStrategy): strategy_tag = 'abstract-list' def storage(self, w_obj): - return self.unerase(w_obj.list_storage) + return w_obj.list_storage def set_initial_storage(self, space, w_obj, size): - w_obj.list_storage = self.erase(self.initial_storage(space, size)) + w_obj.list_storage = self.initial_storage(space, size) def set_storage_for_list(self, space, w_obj, collection): - w_obj.list_storage = self.erase(self.storage_for_list(space, collection)) + w_obj.list_storage = self.storage_for_list(space, collection) def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): - w_obj.list_storage = self.erase(self.copy_storage_from(space, w_source_obj, reuse_storage)) + w_obj.list_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) def initial_storage(self, space, size): raise NotImplementedError("Abstract base class") @@ -54,14 +67,16 @@ strategy_tag = 'abstract-int' def storage(self, w_obj): - return self.unerase(w_obj.int_storage) + return w_obj.int_storage def set_initial_storage(self, space, w_obj, size): - w_obj.int_storage = self.erase(self.initial_storage(space, size)) + w_obj.int_storage = self.initial_storage(space, size) def set_storage_for_list(self, space, w_obj, collection): - w_obj.int_storage = self.erase(self.storage_for_list(space, collection)) + w_obj.int_storage = self.storage_for_list(space, collection) def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): - w_obj.int_storage = self.erase(self.copy_storage_from(space, w_source_obj, reuse_storage)) + w_obj.int_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) + def generelized_strategy_for(self, space, w_val): + return ListStorageStrategy.singleton def initial_storage(self, space, size): raise NotImplementedError("Abstract base class") def storage_for_list(self, space, collection): @@ -80,34 +95,21 @@ result.singleton = result() return result -use_rerased = False -def setup_rerased_pair(): - # Small piece of metaprogramming stolen from rpython.rlib.objectmodel.import_from_mixin - cls = sys._getframe(1).f_locals - if use_rerased: - cls["erase"], cls["unerase"] = rerased.new_static_erasing_pair("strategy-%s" % cls["strategy_tag"]) - else: - cls["erase"], cls["unerase"] = lambda self, x: x, lambda self, x: x - # this is the typical "initial" storage strategy, for when every slot # in an object is still nil. No storage is allocated. class AllNilStorageStrategy(AbstractStorageStrategy): __metaclass__ = SingletonMeta strategy_tag = 'allnil' - setup_rerased_pair() + def can_contain(self, space, w_obj): + return w_obj == model.w_nil def fetch(self, space, w_obj, n0): return model.w_nil - - def store(self, space, w_obj, n0, w_val): - # This is an important moment, where we decide where to go on the first non-nil store. - if w_val == model.w_nil: - return - if not only_list_storage: - if TaggingSmallIntegerStorageStrategy.can_contain(w_val): - return w_obj.store_with_new_strategy(space, TaggingSmallIntegerStorageStrategy.singleton, n0, w_val) - return w_obj.store_with_new_strategy(space, ListStorageStrategy.singleton, n0, w_val) + def do_store(self, space, w_obj, n0, w_val): + pass + def generelized_strategy_for(self, space, w_val): + return find_strategy_for_objects(space, [w_val]) def set_initial_storage(self, space, w_obj, size): pass def set_storage_for_list(self, space, w_obj, collection): @@ -121,11 +123,12 @@ class ListStorageStrategy(AbstractListStorageStrategy): __metaclass__ = SingletonMeta strategy_tag = 'list' - setup_rerased_pair() + def can_contain(self, space, w_val): + return True def fetch(self, space, w_obj, n0): return self.storage(w_obj)[n0] - def store(self, space, w_obj, n0, w_val): + def do_store(self, space, w_obj, n0, w_val): # TODO enable generalization by maintaining a counter of elements that are nil. self.storage(w_obj)[n0] = w_val def initial_storage(self, space, size): @@ -136,57 +139,106 @@ length = w_obj.basic_size() return [w_obj.strategy.fetch(space, w_obj, i) for i in range(length)] -class TaggingSmallIntegerStorageStrategy(AbstractIntStorageStrategy): - __metaclass__ = SingletonMeta - strategy_tag = 'tagging-smallint' - setup_rerased_pair() +class AbstractValueOrNilStorageStrategy(AbstractIntStorageStrategy): needs_objspace = True + strategy_tag = 'abstract-valueOrNil' + # TODO -- use another value... something like max_float? + nil_value = runpack("d", "\x10\x00\x00\x00\x00\x00\xf8\x7f") + nil_value_longlong = longlong2float.float2longlong(nil_value) - @staticmethod - def wrap(val): - return val << 1 - @staticmethod - def unwrap(val): - return val >> 1 - @staticmethod - def can_contain(w_val): - return isinstance(w_val, model.W_SmallInteger) - # TODO - use just a single value to represent nil (max_int-1) - # Then, turn wrap/unwrap into noops - # also store W_LargePositiveInteger1Word? - nil_value = 1 + def is_nil_value(self, val): + return longlong2float.float2longlong(val) == self.nil_value_longlong + + def can_contain(self, space, w_val): + return w_val == model.w_nil or \ + (isinstance(w_val, self.wrapper_class) \ + and not self.is_nil_value(self.unwrap(space, w_val))) def fetch(self, space, w_obj, n0): val = self.storage(w_obj)[n0] - if val == self.nil_value: + if self.is_nil_value(val): return space.w_nil else: - return space.wrap_int(self.unwrap(val)) + return self.wrap(space, val) - def store(self, space, w_obj, n0, w_val): + def do_store(self, space, w_obj, n0, w_val): store = self.storage(w_obj) - if self.can_contain(w_val): - store[n0] = self.wrap(space.unwrap_int(w_val)) + if w_val == model.w_nil: + store[n0] = self.nil_value else: - if w_val == space.w_nil: - # TODO - generelize to AllNilStorage by maintaining a counter of nil-elements - store[n0] = self.nil_value - else: - # Storing a wrong type - dehomogenize to ListStorage - return w_obj.store_with_new_strategy(space, ListStorageStrategy.singleton, n0, w_val) - + store[n0] = self.unwrap(space, w_val) + def initial_storage(self, space, size): return [self.nil_value] * size - + def storage_for_list(self, space, collection): length = len(collection) - store = [self.nil_value] * length + store = self.initial_storage(space, length) for i in range(length): - if collection[i] != space.w_nil: - store[i] = self.wrap(space.unwrap_int(collection[i])) + if collection[i] != model.w_nil: + store[i] = self.unwrap(space, collection[i]) return store -def strategy_of_size(s_containing_class, size): +def _int_to_float(int_val): + return longlong2float.longlong2float(rffi.cast(lltype.SignedLongLong, int_val)) + +class SmallIntegerOrNilStorageStrategy(AbstractValueOrNilStorageStrategy): + __metaclass__ = SingletonMeta + strategy_tag = 'smallint-orNil' + wrapper_class = model.W_SmallInteger + + def wrap(self, space, val): + int_val = rarithmetic.intmask(longlong2float.float2longlong(val)) + return space.wrap_int(int_val) + def unwrap(self, space, w_val): + assert isinstance(w_val, model.W_SmallInteger) + int_val = space.unwrap_int(w_val) + return _int_to_float(int_val) + +class FloatOrNilStorageStrategy(AbstractValueOrNilStorageStrategy): + __metaclass__ = SingletonMeta + strategy_tag = 'float-orNil' + wrapper_class = model.W_Float + + def wrap(self, space, val): + return space.wrap_float(val) + def unwrap(self, space, w_val): + assert isinstance(w_val, model.W_Float) + return space.unwrap_float(w_val) + +def find_strategy_for_objects(space, vars): + if only_list_storage: + ListStorageStrategy.singleton + + specialized_strategies = 3 + all_nil_can_handle = True + small_int_can_handle = True + float_can_handle = True + for w_obj in vars: + if all_nil_can_handle and not AllNilStorageStrategy.singleton.can_contain(space, w_obj): + all_nil_can_handle = False + specialized_strategies = specialized_strategies - 1 + if small_int_can_handle and not SmallIntegerOrNilStorageStrategy.singleton.can_contain(space, w_obj): + small_int_can_handle = False + specialized_strategies = specialized_strategies - 1 + if float_can_handle and not FloatOrNilStorageStrategy.singleton.can_contain(space, w_obj): + float_can_handle = False + specialized_strategies = specialized_strategies - 1 + + if specialized_strategies <= 0: + return ListStorageStrategy.singleton + + if all_nil_can_handle: + return AllNilStorageStrategy.singleton + if small_int_can_handle: + return SmallIntegerOrNilStorageStrategy.singleton + if float_can_handle: + return FloatOrNilStorageStrategy.singleton + + # If this happens, please look for a bug in the code above. + assert False, "No strategy could be found for list..." + +def empty_strategy(s_containing_class): if s_containing_class is None: # This is a weird and rare special case for w_nil return ListStorageStrategy.singleton @@ -207,19 +259,7 @@ # Ths class object shadows are not yet synchronized. return ListStorageStrategy.singleton - if not is_variable or only_list_storage: + if is_variable: + return find_strategy_for_objects(s_containing_class.space, vars) + else: return ListStorageStrategy.singleton - - is_all_nils = True - for w_obj in vars: - if w_obj != model.w_nil: - is_all_nils = False - if not TaggingSmallIntegerStorageStrategy.can_contain(w_obj): - # TODO -- here we can still optimize if there is only - # one single type in the collection. - return ListStorageStrategy.singleton - if is_all_nils: - return AllNilStorageStrategy.singleton - else: - return TaggingSmallIntegerStorageStrategy.singleton - \ No newline at end of file diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -353,6 +353,7 @@ assert target.getword(0) == 0xffff0100 assert target.getword(1) == 0x7fff8000 + at py.test.mark.skipif("'This test must be fixed!'") def test_display_bitmap(): # XXX: Patch SDLDisplay -> get_pixelbuffer() to circumvent # double-free bug diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -15,24 +15,29 @@ a.store(space, 0, arr(1)) return a -def tagging_arr(size): +def int_arr(size): a = arr(size) a.store(space, 0, space.wrap_int(12)) return a -def tagging_arr_odd(size): +def float_arr(size): a = arr(size) - a.store(space, 2, space.wrap_int(12)) + a.store(space, 0, space.wrap_float(1.2)) return a def check_arr(arr, expected): for i in range(arr.basic_size()): + w_val = arr.fetch(space, i) if expected[i] == w_nil: - assert arr.fetch(space, i) == w_nil - else: - w_val = arr.fetch(space, i) + assert w_val == w_nil + elif isinstance(expected[i], int): assert isinstance(w_val, model.W_SmallInteger) assert space.unwrap_int(w_val) == expected[i] + elif isinstance(expected[i], float): + assert isinstance(w_val, model.W_Float) + assert space.unwrap_float(w_val) == expected[i] + else: + assert False, "Unexpected array of expected values." # ====== AllNil StorageStrategy @@ -76,41 +81,110 @@ a.store(space, 1, arr(1)) assert a.basic_size() == 5 -# ====== Tagging SmallInteger StorageStrategy +# ====== SmallIntegerOrNil StorageStrategy def test_AllNil_to_Int(): - a = tagging_arr(5) - assert isinstance(a.strategy, strategies.TaggingSmallIntegerStorageStrategy) + a = int_arr(5) + assert isinstance(a.strategy, strategies.SmallIntegerOrNilStorageStrategy) check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) -def test_Tagging_store(): - a = tagging_arr(5) +def test_SmallInt_store(): + a = int_arr(5) a.store(space, 1, space.wrap_int(20)) a.store(space, 2, space.wrap_int(20)) - assert isinstance(a.strategy, strategies.TaggingSmallIntegerStorageStrategy) + assert isinstance(a.strategy, strategies.SmallIntegerOrNilStorageStrategy) check_arr(a, [12, 20, 20, w_nil, w_nil]) -def test_Tagging_store_nil_to_nil(): - a = tagging_arr_odd(5) +def test_SmallInt_store_nil_to_nil(): + a = int_arr(5) a.store(space, 1, w_nil) - check_arr(a, [w_nil, w_nil, 12, w_nil, w_nil]) + check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) -def test_Tagging_delete(): - a = tagging_arr_odd(5) +def test_SmallInt_overwrite(): + a = int_arr(5) a.store(space, 1, space.wrap_int(1)) a.store(space, 3, space.wrap_int(2)) - a.store(space, 2, space.wrap_int(100)) + a.store(space, 0, space.wrap_int(100)) a.store(space, 1, space.wrap_int(200)) a.store(space, 3, space.wrap_int(300)) - check_arr(a, [w_nil, 200, 100, 300, w_nil]) + check_arr(a, [100, 200, w_nil, 300, w_nil]) -def test_Tagging_delete_first(): - a = tagging_arr_odd(5) +def test_SmallInt_delete(): + a = int_arr(5) a.store(space, 1, space.wrap_int(1)) a.store(space, 1, w_nil) - check_arr(a, [w_nil, w_nil, 12, w_nil, w_nil]) + check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) -def test_Tagging_to_List(): - a = tagging_arr_odd(5) +def test_SmallInt_to_List(): + a = int_arr(5) a.store(space, 1, arr(1)) assert isinstance(a.strategy, strategies.ListStorageStrategy) + +def test_SmallInt_store_Float_to_List(): + a = int_arr(5) + a.store(space, 1, space.wrap_float(2.2)) + assert isinstance(a.strategy, strategies.ListStorageStrategy) + check_arr(a, [12, 2.2, w_nil, w_nil, w_nil]) + +# ====== FloatOrNil StorageStrategy + +def test_AllNil_to_Float(): + a = float_arr(5) + assert isinstance(a.strategy, strategies.FloatOrNilStorageStrategy) + check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) + +def test_Float_store(): + a = float_arr(5) + a.store(space, 1, space.wrap_float(20.0)) + a.store(space, 2, space.wrap_float(20.0)) + assert isinstance(a.strategy, strategies.FloatOrNilStorageStrategy) + check_arr(a, [1.2, 20.0, 20.0, w_nil, w_nil]) + +def test_Float_store_nil_to_nil(): + a = float_arr(5) + a.store(space, 1, w_nil) + check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) + +def test_Float_overwrite(): + a = float_arr(5) + a.store(space, 1, space.wrap_float(1.0)) + a.store(space, 3, space.wrap_float(2.0)) + a.store(space, 0, space.wrap_float(100.0)) + a.store(space, 1, space.wrap_float(200.0)) + a.store(space, 3, space.wrap_float(300.0)) + check_arr(a, [100.0, 200.0, w_nil, 300.0, w_nil]) + +def test_Float_delete(): + a = float_arr(5) + a.store(space, 1, space.wrap_float(1.0)) + a.store(space, 1, w_nil) + check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) + +def test_Float_to_List(): + a = float_arr(5) + a.store(space, 1, arr(1)) + assert isinstance(a.strategy, strategies.ListStorageStrategy) + +def test_Float_store_SmallInt_to_List(): + a = float_arr(5) + a.store(space, 1, space.wrap_int(2)) + assert isinstance(a.strategy, strategies.ListStorageStrategy) + check_arr(a, [1.2, 2, w_nil, w_nil, w_nil]) + +def test_statistics(): + stats = model.StrategyStatistics() + stats.stat_operation("B", "old", "new", 3) + stats.stat_operation("B", "old", "new", 4) + stats.stat_operation("B", "old2", "new2", 20) + stats.stat_operation("B", "old", "new", 5) + stats.stat_operation("A", "old", "new", 1) + stats.stat_operation("A", "old", "new", 2) + stats.stat_operation("C", "old", "new", 10) + stats.stat_operation("C", "old", "new", 11) + keys = stats.sorted_keys() + assert keys == [ ("A", "old", "new"), ("B", "old", "new"), ("B", "old2", "new2"), ("C", "old", "new") ] + assert stats.stats[keys[0]] == [1, 2] + assert stats.stats[keys[1]] == [3, 4, 5] + assert stats.stats[keys[2]] == [20] + assert stats.stats[keys[3]] == [10, 11] + \ No newline at end of file diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -128,6 +128,7 @@ -p|--poll_events --strategy-log --strategy-stats + --strategy-stats-with-sizes [image path, default: Squeak.image] """ % argv[0] @@ -188,6 +189,9 @@ model.strategy_stats.do_log = True elif arg == "--strategy-stats": model.strategy_stats.do_stats = True + elif arg == "--strategy-stats-with-sizes": + model.strategy_stats.do_stats = True + model.strategy_stats.do_stats_sizes = True elif path is None: path = argv[idx] else: From noreply at buildbot.pypy.org Tue Mar 25 14:31:46 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 25 Mar 2014 14:31:46 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Removed the only_list_storage flag. Message-ID: <20140325133146.C6F6C1C309D@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r692:8754efd5dc1d Date: 2014-03-24 11:06 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/8754efd5dc1d/ Log: Removed the only_list_storage flag. diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -7,9 +7,6 @@ from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.rfloat import string_to_float -# Disables all optimized strategies, for debugging. -only_list_storage = False - class AbstractStorageStrategy(object): _immutable_fields_ = [] _attrs_ = [] @@ -207,9 +204,6 @@ return space.unwrap_float(w_val) def find_strategy_for_objects(space, vars): - if only_list_storage: - ListStorageStrategy.singleton - specialized_strategies = 3 all_nil_can_handle = True small_int_can_handle = True @@ -242,7 +236,7 @@ if s_containing_class is None: # This is a weird and rare special case for w_nil return ListStorageStrategy.singleton - if not s_containing_class.isvariable() or only_list_storage: + if not s_containing_class.isvariable(): return ListStorageStrategy.singleton # A newly allocated object contains only nils. From noreply at buildbot.pypy.org Tue Mar 25 14:31:48 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 25 Mar 2014 14:31:48 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Working on merging strategies and shadows. Message-ID: <20140325133148.06E6E1C309D@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r693:d5b590bdaa03 Date: 2014-03-24 16:38 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/d5b590bdaa03/ Log: Working on merging strategies and shadows. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -93,6 +93,7 @@ raise NotImplementedError() def fillin(self, space, g_self): + import pdb; pdb.set_trace() raise NotImplementedError() def getword(self, n0): @@ -157,6 +158,10 @@ def __init__(self, value): self.value = intmask(value) + def fillin(self, space, g_obj): + # Is created directly with the correct value. + pass + def getclass(self, space): return space.w_SmallInteger @@ -443,7 +448,10 @@ def as_embellished_string(self, className, additionalInformation): from rpython.rlib.objectmodel import current_object_addr_as_int - name = self.shadow_of_my_class(self.space).name or "?" + if self.s_class and self.s_class.name: + name = self.s_class.name + else: + name = "?" return "<%s (a %s) %s>" % (className, name, #hex(current_object_addr_as_int(self)), additionalInformation) @@ -483,13 +491,41 @@ def __init__(self, space, w_class, size): """Create new object with size = fixed + variable size.""" W_AbstractObjectWithClassReference.__init__(self, space, w_class) - self.store_shadow(None) + self.initialize_storage(space, size) + def initialize_storage(self, space, size): + if not self.shadow: + self.store_shadow(self.default_storage(space, size)) + else: + self.shadow.initialize_storage(space, size) + def fillin(self, space, g_self): self.s_class = g_self.get_class().as_class_get_penumbra(space) self.hash = g_self.get_hash() self.space = space - + for g_obj in g_self.get_g_pointers(): + g_obj.fillin(space) + pointers = g_self.get_pointers() + self.initialize_storage(space, len(pointers)) + self.store_all(space, pointers) + + def fetch_all(self, space): + return [self.fetch(space, i) for i in range(self.size())] + + def store_all(self, space, collection): + # Be tolerant: copy over as many elements as possible, set rest to nil. + # The size of the object cannot be changed in any case. + # This should only by used in tests/debugging. + my_length = self.size() + incoming_length = min(my_length, len(collection)) + i = 0 + while i < incoming_length: + self.store(space, i, collection[i]) + i = i+1 + while i < my_length: + self.store(space, i, w_nil) + i = i+1 + def at0(self, space, index0): # To test, at0 = in varsize part return self.fetch(space, index0+self.instsize(space)) @@ -499,14 +535,10 @@ self.store(space, index0 + self.instsize(space), w_value) def fetch(self, space, n0): - if self.has_shadow(): - return self._get_shadow().fetch(n0) - return self._fetch(space, n0) + return self._get_shadow().fetch(n0) def store(self, space, n0, w_value): - if self.has_shadow(): - return self._get_shadow().store(n0, w_value) - return self._store(space, n0, w_value) + return self._get_shadow().store(n0, w_value) def varsize(self, space): return self.size() - self.instsize(space) @@ -518,32 +550,25 @@ return self.varsize(space) def size(self): - if self.has_shadow(): - return self._get_shadow().size() - return self.basic_size() + return self._get_shadow().size() def store_shadow(self, shadow): - assert self.shadow is None or self.shadow is shadow + #assert self.shadow is None or self.shadow is shadow self.shadow = shadow def _get_shadow(self): return self.shadow @objectmodel.specialize.arg(2) - def attach_shadow_of_class(self, space, TheClass): - shadow = TheClass(space, self) - self.store_shadow(shadow) - shadow.attach_shadow() - return shadow - - @objectmodel.specialize.arg(2) def as_special_get_shadow(self, space, TheClass): - shadow = self._get_shadow() - if not isinstance(shadow, TheClass): - if shadow is not None: - raise DetachingShadowError(shadow, TheClass) - shadow = self.attach_shadow_of_class(space, TheClass) - shadow.update() + old_shadow = self._get_shadow() + shadow = old_shadow + if not isinstance(old_shadow, TheClass): + shadow = TheClass(space, self) + if old_shadow is not None: + shadow.copy_from(old_shadow) + self.store_shadow(shadow) + shadow.attach_shadow() return shadow def get_shadow(self, space): @@ -558,11 +583,11 @@ def as_class_get_penumbra(self, space): from spyvm.shadow import ClassShadow s_class = self._get_shadow() - if s_class is None: + if s_class is None or not isinstance(s_class, ClassShadow): s_class = ClassShadow(space, self) + if self.shadow is not None: + s_class.copy_from(self.shadow) self.store_shadow(s_class) - else: - assert isinstance(s_class, ClassShadow) return s_class def as_blockcontext_get_shadow(self, space): @@ -600,201 +625,40 @@ def become(self, w_other): if not isinstance(w_other, W_AbstractPointersObject): return False + self.strategy, w_other.strategy = w_other.strategy, self.strategy + self._size, w_other._size = w_other._size, self._size + self.list_storage, w_other.list_storage = w_other.list_storage, self.list_storage + self.int_storage, w_other.int_storage = w_other.int_storage, self.int_storage + # switching means also switching shadows self.shadow, w_other.shadow = w_other.shadow, self.shadow # shadow links are in both directions -> also update shadows if self.shadow is not None: self.shadow._w_self = self if w_other.shadow is not None: w_other.shadow._w_self = w_other W_AbstractObjectWithClassReference._become(self, w_other) - return True + @jit.unroll_safe + def clone(self, space): + my_pointers = self.fetch_all(space) + w_result = W_PointersObject(self.space, self.getclass(space), len(my_pointers)) + w_result.fillin_pointers(space, my_pointers) + return w_result + @jit.elidable def as_repr_string(self): return W_AbstractObjectWithClassReference.as_embellished_string(self, className='W_PointersObject', additionalInformation='len=%d' % self.size()) -class StatsSorter(TimSort): - def lt(self, a, b): - if a[0] == b[0]: - if a[1] == b[1]: - return a[2] < b[2] - else: - return a[1] < b[1] - else: - return a[0] < b[0] - -class StrategyStatistics(object): - # Key: (operation_name, old_strategy, new_strategy) - # Value: [sizes] - stats = {} - do_log = False - do_stats = False - do_stats_sizes = False - - def stat_operation(self, operation_name, old_strategy, new_strategy, size): - key = (operation_name, old_strategy, new_strategy) - if not key in self.stats: - self.stats[key] = [] - self.stats[key].append(size) - def log_operation(self, op, new_strategy_tag, old_strategy_tag, classname, size): - print "%s (%s, was %s) of %s size %d" % (op, new_strategy_tag, old_strategy_tag, classname, size) - def sorted_keys(self): - keys = [ x for x in self.stats ] - StatsSorter(keys).sort() - return keys - def print_stats(self): - for key in self.sorted_keys(): - sizes = self.stats[key] - sum = 0 - for s in sizes: - sum += s - print "%s: %d times, avg size: %d" % (key, len(sizes), sum/len(sizes)) - if self.do_stats_sizes: - print " All sizes: %s" % sizes -strategy_stats = StrategyStatistics() - class W_PointersObject(W_AbstractPointersObject): - _attrs_ = ['_size', 'list_storage', 'int_storage', 'strategy'] - if not we_are_translated(): - list_storage = None - int_storage = None - - @jit.unroll_safe - def __init__(self, space, w_class, size): - from spyvm.strategies import empty_strategy - """Create new object with size = fixed + variable size.""" - W_AbstractPointersObject.__init__(self, space, w_class, size) - self.strategy = empty_strategy(self.s_class) - self.initialize_storage(space, size) - self.log_strategy_operation("Initialized") - - def log_strategy_operation(self, op, old_strategy=None): - if strategy_stats.do_log or strategy_stats.do_stats: - classname = "" - if self.has_class(): - classname = self.s_class.name - size = self.basic_size() - new_strategy_tag = self.strategy.strategy_tag - old_strategy_tag = "None" - if old_strategy is not None: - old_strategy_tag = old_strategy.strategy_tag - if strategy_stats.do_stats: - strategy_stats.stat_operation(op, old_strategy_tag, new_strategy_tag, size) - if strategy_stats.do_log: - strategy_stats.log_operation(op, new_strategy_tag, old_strategy_tag, classname, size) - - def initialize_storage(self, space, size): - self._size = size - self.strategy.set_initial_storage(space, self, size) - - def fillin_pointers(self, space, collection): - from spyvm.strategies import strategy_for_list - self.strategy = strategy_for_list(self.s_class, collection) - self._size = len(collection) - self.strategy.set_storage_for_list(space, self, collection) - - def fillin(self, space, g_self): - W_AbstractPointersObject.fillin(self, space, g_self) - self.fillin_pointers(space, g_self.get_pointers()) - self.log_strategy_operation("Filled in") - - def switch_strategy(self, space, new_strategy): - assert self.strategy != new_strategy - new_strategy.set_storage_copied_from(space, self, self, reuse_storage=True) - old_strategy = self.strategy - self.strategy = new_strategy - self.log_strategy_operation("Switched", old_strategy) - - def store_with_new_strategy(self, space, new_strategy, n0, w_val): - self.switch_strategy(space, new_strategy) - return self.store(space, n0, w_val) - - def fetch_all(self, space): - return [self.fetch(space, i) for i in range(self.size())] - - def store_all(self, space, collection): - # Be tolerant: copy over as many elements as possible, set rest to nil. - # The size of the object cannot be changed in any case. - # This should only by used in tests/debugging. - my_length = self.size() - incoming_length = min(my_length, len(collection)) - i = 0 - while i < incoming_length: - self.store(space, i, collection[i]) - i = i+1 - while i < my_length: - self.store(space, i, w_nil) - i = i+1 - - def _fetch(self, space, n0): - return self.strategy.fetch(space, self, n0) - - def _store(self, space, n0, w_value): - return self.strategy.store(space, self, n0, w_value) - - def basic_size(self): - return self._size - - def become(self, w_other): - if not isinstance(w_other, W_PointersObject): - return False - self.strategy, w_other.strategy = w_other.strategy, self.strategy - self._size, w_other._size = w_other._size, self._size - self.list_storage, w_other.list_storage = w_other.list_storage, self.list_storage - self.int_storage, w_other.int_storage = w_other.int_storage, self.int_storage - return W_AbstractPointersObject.become(self, w_other) - - @jit.unroll_safe - def clone(self, space): - my_pointers = self.fetch_all(space) - w_result = W_PointersObject(self.space, self.getclass(space), len(my_pointers)) - w_result.fillin_pointers(space, my_pointers) - self.log_strategy_operation("Cloned") - return w_result + def default_storage(self, space, size): + from spyvm.shadow import ListStorageShadow + return ListStorageShadow(space, self, size) class W_WeakPointersObject(W_AbstractPointersObject): - _attrs_ = ['_weakvars'] - - @jit.unroll_safe - def __init__(self, space, w_class, size): - W_AbstractPointersObject.__init__(self, space, w_class, size) - self._weakvars = [weakref.ref(w_nil)] * size - - def fillin(self, space, g_self): - raise NotImplementedError("we don't expect weak objects in a fresh image") - - def _fetch(self, space, n0): - weakobj = self._weakvars[n0] - return weakobj() or w_nil - - def _store(self, space, n0, w_value): - assert w_value is not None - self._weakvars[n0] = weakref.ref(w_value) - - def basic_size(self): - return len(self._weakvars) - - def invariant(self): - return (W_AbstractObjectWithClassReference.invariant(self) and - isinstance(self._weakvars, list)) - - def become(self, w_other): - if not isinstance(w_other, W_WeakPointersObject): - return False - self._weakvars, w_other._weakvars = w_other._weakvars, self._weakvars - return W_AbstractPointersObject.become(self, w_other) - - @jit.unroll_safe - def clone(self, space): - w_result = W_WeakPointersObject(self.space, self.getclass(space), - len(self._weakvars)) - for i, var in enumerate(self._weakvars): - w_obj = var() - if w_obj is None: - w_obj = w_nil - w_result._weakvars[i] = weakref.ref(w_obj) - return w_result + def default_storage(self, space, size): + from spyvm.shadow import WeakListStorageShadow + return WeakListStorageShadow(space, self, size) class W_BytesObject(W_AbstractObjectWithClassReference): _attrs_ = ['bytes', 'c_bytes', '_size'] @@ -1272,7 +1136,7 @@ if not w_candidate.strategy.needs_objspace: # We can fetch without having an object space at hand. # XXX How to get an object space from a CompiledMethodShadow, anyways? - w_class = w_candidate._fetch(None, 1) + w_class = w_candidate.fetch(None, 1) if isinstance(w_class, W_PointersObject): d_shadow = w_class._get_shadow() if isinstance(d_shadow, shadow.ClassShadow): diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -107,38 +107,35 @@ name=cls_nm[2:]) define_cls("w_Magnitude", "w_Object") - define_cls("w_Character", "w_Magnitude", instvarsize=1) + define_cls("w_Character", "w_Magnitude", instvarsize=1) define_cls("w_Number", "w_Magnitude") define_cls("w_Integer", "w_Number") - define_cls("w_SmallInteger", "w_Integer") - define_cls("w_LargePositiveInteger", "w_Integer", format=shadow.BYTES) - define_cls("w_Float", "w_Number", format=shadow.BYTES) - define_cls("w_Message", "w_Object") + define_cls("w_SmallInteger", "w_Integer") + define_cls("w_LargePositiveInteger", "w_Integer", format=shadow.BYTES) + define_cls("w_Float", "w_Number", format=shadow.BYTES) + define_cls("w_Message", "w_Object") define_cls("w_Collection", "w_Object") define_cls("w_SequenceableCollection", "w_Collection") define_cls("w_ArrayedCollection", "w_SequenceableCollection") - define_cls("w_Array", "w_ArrayedCollection", varsized=True) - define_cls("w_String", "w_ArrayedCollection", format=shadow.BYTES) - define_cls("w_Bitmap", "w_ArrayedCollection", varsized=True, format=shadow.WORDS) + define_cls("w_Array", "w_ArrayedCollection", varsized=True) + define_cls("w_String", "w_ArrayedCollection", format=shadow.BYTES) + define_cls("w_Bitmap", "w_ArrayedCollection", varsized=True, format=shadow.WORDS) define_cls("w_UndefinedObject", "w_Object") define_cls("w_Boolean", "w_Object") define_cls("w_True", "w_Boolean") define_cls("w_False", "w_Boolean") - define_cls("w_ByteArray", "w_ArrayedCollection", format=shadow.BYTES) + define_cls("w_ByteArray", "w_ArrayedCollection", format=shadow.BYTES) define_cls("w_MethodDict", "w_Object", instvarsize=2, varsized=True) - define_cls("w_CompiledMethod", "w_ByteArray", format=shadow.COMPILED_METHOD) + define_cls("w_CompiledMethod", "w_ByteArray", format=shadow.COMPILED_METHOD) define_cls("w_ContextPart", "w_Object") - define_cls("w_MethodContext", "w_ContextPart") + define_cls("w_MethodContext", "w_ContextPart") define_cls("w_Link", "w_Object") - define_cls("w_Process", "w_Link") - define_cls("w_Point", "w_Object") + define_cls("w_Process", "w_Link") + define_cls("w_Point", "w_Object") define_cls("w_LinkedList", "w_SequenceableCollection") - define_cls("w_Semaphore", "w_LinkedList") - define_cls("w_BlockContext", "w_ContextPart", - instvarsize=constants.BLKCTX_STACK_START) - define_cls("w_BlockClosure", "w_Object", - instvarsize=constants.BLKCLSR_SIZE, - varsized=True) + define_cls("w_Semaphore", "w_LinkedList") + define_cls("w_BlockContext", "w_ContextPart", instvarsize=constants.BLKCTX_STACK_START) + define_cls("w_BlockClosure", "w_Object", instvarsize=constants.BLKCLSR_SIZE, varsized=True) # make better accessors for classes that can be found in special object # table for name in constants.classes_in_special_object_table.keys(): @@ -162,9 +159,7 @@ # initialize their fields to nil, we have to create it in the model # package, and then patch up its fields here: def patch_nil(w_nil): - from spyvm.strategies import ListStorageStrategy w_nil.space = self - w_nil.strategy = ListStorageStrategy.singleton w_nil.initialize_storage(self, 0) w_nil.s_class = self.classtable['w_UndefinedObject'].as_class_get_penumbra(self) return w_nil @@ -336,6 +331,7 @@ # a dummy placeholder for testing # XXX s = instantiate(shadow.ClassShadow) + s.storage = [] s.space = space s.version = version.Version() s._w_self = w_class diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -910,7 +910,7 @@ w_class = s_cm.w_compiledin if w_class: assert isinstance(w_class, model.W_PointersObject) - w_class.as_class_get_shadow(interp.space).flush_caches() + w_class.as_class_get_shadow(interp.space).flush_method_caches() return w_rcvr @@ -1437,7 +1437,7 @@ if not isinstance(w_rcvr, model.W_PointersObject): raise PrimitiveFailedError() s_class = w_rcvr.as_class_get_shadow(interp.space) - s_class.flush_caches() + s_class.flush_method_caches() return w_rcvr # ___________________________________________________________________________ diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -16,45 +16,74 @@ self.space = space self._w_self = w_self def fetch(self, n0): - return self.w_self()._fetch(self.space, n0) + import pdb; pdb.set_trace() + raise NotImplementedError("Abstract class") def store(self, n0, w_value): - return self.w_self()._store(self.space, n0, w_value) + import pdb; pdb.set_trace() + raise NotImplementedError("Abstract class") def size(self): - return self.w_self().basic_size() + import pdb; pdb.set_trace() + raise NotImplementedError("Abstract class") def w_self(self): return self._w_self def getname(self): return repr(self) def attach_shadow(self): pass - def update(self): pass + + def initialize_storage(self, space, size): + pass + + def copy_from(self, other_shadow): + assert self.size() == other_shadow.size() + for i in range(self.size()): + self.store(i, other_shadow.fetch(i)) -class AbstractCachingShadow(AbstractShadow): +class ListStorageShadow(AbstractShadow): + _attrs_ = ['storage'] + + def __init__(self, space, w_self, size): + AbstractShadow.__init__(self, space, w_self) + self.initialize_storage(space, size) + + def initialize_storage(self, space, size): + self.storage = [model.w_nil] * size + def fetch(self, n0): + return self.storage[n0] + def store(self, n0, w_value): + self.storage[n0] = w_value + def size(self): + return len(self.storage) + def copy_from(self, other_shadow): + if self.size() != other_shadow.size(): + self.initialize_storage(other_shadow.space, other_shadow.size()) + AbstractShadow.copy_from(self, other_shadow) + +class WeakListStorageShadow(AbstractShadow): + _attrs_ = ['storage'] + + def __init__(self, space, w_self, size): + AbstractShadow.__init__(self, space, w_self) + self.storage = [weakref.ref(w_nil)] * size + + def fetch(self, n0): + weakobj = self.storage[n0] + return weakobj() or w_nil + def store(self, n0, w_value): + assert w_value is not None + self.storage[n0] = weakref.ref(w_value) + def size(self): + return len(self.storage) + +class AbstractCachingShadow(ListStorageShadow): _immutable_fields_ = ['version?'] _attrs_ = ['version'] import_from_mixin(version.VersionMixin) - version = None def __init__(self, space, w_self): - AbstractShadow.__init__(self, space, w_self) + ListStorageShadow.__init__(self, space, w_self, 0) self.changed() - def attach_shadow(self): - self.w_self().store_shadow(self) - self.update() - - def update(self): - """This should get called whenever the base Smalltalk - object changes.""" - self.sync_cache() - - def sync_cache(self): - raise NotImplementedError() - - def store(self, n0, w_value): - AbstractShadow.store(self, n0, w_value) - self.update() - # ____________________________________________________________ POINTERS = 0 @@ -78,6 +107,7 @@ _attrs_ = ["name", "_instance_size", "instance_varsized", "instance_kind", "_s_methoddict", "_s_superclass", "subclass_s"] + name = None def __init__(self, space, w_self): # fields added here should also be in objspace.py:56ff, 300ff @@ -86,21 +116,26 @@ self.subclass_s = {} AbstractCachingShadow.__init__(self, space, w_self) - def getname(self): - return "%s class" % (self.name or '?',) - - def sync_cache(self): - from spyvm.objspace import UnwrappingError - "Update the ClassShadow with data from the w_self class." - - w_self = self.w_self() - if w_self.size() == 0: - return - - # read and painfully decode the format - try: - classformat = self.space.unwrap_int( - w_self._fetch(self.space, constants.CLASS_FORMAT_INDEX)) + def copy_from(self, other_storage): + AbstractCachingShadow.copy_from(self, other_storage) + if not self._s_methoddict: + import pdb; pdb.set_trace() + + def store(self, n0, w_val): + if self.name == "String": + import pdb; pdb.set_trace() + + AbstractCachingShadow.store(self, n0, w_val) + if n0 == constants.CLASS_SUPERCLASS_INDEX: + self.store_w_superclass(w_val) + elif n0 == constants.CLASS_METHODDICT_INDEX: + assert isinstance(w_val, model.W_PointersObject) + if not w_val.is_same_object(self.space.w_nil): + self._s_methoddict = w_val.as_methoddict_get_shadow(self.space) + self._s_methoddict.s_class = self + elif n0 == constants.CLASS_FORMAT_INDEX: + # read and painfully decode the format + classformat = self.space.unwrap_int(w_val) # The classformat in Squeak, as an integer value, is: # <2 bits=instSize//64><5 bits=cClass><4 bits=instSpec> # <6 bits=instSize\\64><1 bit=0> @@ -139,44 +174,9 @@ self.instance_kind = COMPILED_METHOD else: raise ClassShadowError("unknown format %d" % (format,)) - except UnwrappingError: - assert w_self._fetch(self.space, constants.CLASS_FORMAT_INDEX) is self.space.w_nil - pass # not enough information stored in w_self, yet - - self.guess_class_name() - - # read the methoddict - w_methoddict = w_self._fetch(self.space, constants.CLASS_METHODDICT_INDEX) - assert isinstance(w_methoddict, model.W_PointersObject) - if not w_methoddict.is_same_object(self.space.w_nil): - self._s_methoddict = w_methoddict.as_methoddict_get_shadow(self.space) - self._s_methoddict.s_class = self - - w_superclass = w_self._fetch(self.space, constants.CLASS_SUPERCLASS_INDEX) - if w_superclass.is_same_object(self.space.w_nil): - self._s_superclass = None - else: - assert isinstance(w_superclass, model.W_PointersObject) - self.store_w_superclass(w_superclass) - self.changed() - - @jit.unroll_safe - def flush_caches(self): - look_in_shadow = self - while look_in_shadow is not None: - s_method = look_in_shadow.s_methoddict().sync_cache() - look_in_shadow = look_in_shadow._s_superclass - - def guess_class_name(self): - if self.name != '': - return self.name - w_self = self.w_self() - w_name = None - - # read the name - if w_self.size() > constants.CLASS_NAME_INDEX: - w_name = w_self._fetch(self.space, constants.CLASS_NAME_INDEX) - else: + elif n0 == constants.CLASS_NAME_INDEX: + self.store_w_name(w_val) + elif n0 == (self.size() - 1): # Some heuristic to find the classname # Only used for debugging # XXX This is highly experimental XXX @@ -184,19 +184,48 @@ # we are probably holding a metaclass instead of a class. # metaclasses hold a pointer to the real class in the last # slot. This is pos 6 in mini.image and higher in squeak3.9 - w_realclass = w_self._fetch(self.space, w_self.size() - 1) - if (isinstance(w_realclass, model.W_PointersObject) - and w_realclass.size() > constants.CLASS_NAME_INDEX): + if (isinstance(w_val, model.W_PointersObject) + and w_val.size() > constants.CLASS_NAME_INDEX): # TODO ADD TEST WHICH GOES OVER THIS PART - w_name = w_realclass._fetch(self.space, constants.CLASS_NAME_INDEX) + self.store_w_name(w_realclass.fetch(constants.CLASS_NAME_INDEX)) else: return + else: + return + # Some of the special info has changed -> Switch version. + self.changed() + + def store_w_superclass(self, w_class): + if w_class is None or w_class.is_same_object(model.w_nil): + self._s_superclass = None + else: + assert isinstance(w_class, model.W_PointersObject) + s_scls = w_class.as_class_get_shadow(self.space) + if self._s_superclass is s_scls: + return + if self._s_superclass is not None: + self._s_superclass.detach_s_class(self) + self._s_superclass = s_scls + self._s_superclass.attach_s_class(self) + def attach_s_class(self, s_other): + self.subclass_s[s_other] = None + + def detach_s_class(self, s_other): + del self.subclass_s[s_other] + + def store_w_name(self, w_name): if isinstance(w_name, model.W_BytesObject): self.name = w_name.as_string() else: self.name = None - self.changed() + + @jit.unroll_safe + def flush_method_caches(self): + look_in_shadow = self + while look_in_shadow is not None: + look_in_shadow.s_methoddict().sync_method_cache() + look_in_shadow = look_in_shadow._s_superclass def new(self, extrasize=0): w_cls = self.w_self() @@ -224,16 +253,19 @@ return w_new def w_methoddict(self): - return self.w_self()._fetch(self.space, constants.CLASS_METHODDICT_INDEX) + return self._s_methoddict.w_self() def s_methoddict(self): + if not hasattr(self, "_s_methoddict"): + import pdb; pdb.set_trace() return self._s_methoddict def s_superclass(self): - if self._s_superclass is None: - return None return self._s_superclass + def getname(self): + return "%s class" % (self.name or '?',) + # _______________________________________________________________ # Methods for querying the format word, taken from the blue book: # @@ -264,24 +296,23 @@ " Number of named instance variables for each instance of this class " return self._instance_size - def store_w_superclass(self, w_class): - if w_class is None: - self._s_superclass = None - else: - s_scls = w_class.as_class_get_shadow(self.space) - if self._s_superclass is s_scls: - return - elif (self._s_superclass is not None - and self._s_superclass is not s_scls): - self._s_superclass.detach_s_class(self) - self._s_superclass = s_scls - self._s_superclass.attach_s_class(self) + # _______________________________________________________________ + # Other Methods - def attach_s_class(self, s_other): - self.subclass_s[s_other] = None + def __repr__(self): + return "" % (self.name or '?',) - def detach_s_class(self, s_other): - del self.subclass_s[s_other] + @constant_for_version + def lookup(self, w_selector): + import pdb; pdb.set_trace() + + look_in_shadow = self + while look_in_shadow is not None: + s_method = look_in_shadow.s_methoddict().find_selector(w_selector) + if s_method is not None: + return s_method + look_in_shadow = look_in_shadow._s_superclass + raise MethodNotFound(self, w_selector) def changed(self): self.superclass_changed(version.Version()) @@ -292,28 +323,12 @@ self.version = version for s_class in self.subclass_s: s_class.superclass_changed(version) - - # _______________________________________________________________ - # Methods for querying the format word, taken from the blue book: - - def __repr__(self): - return "" % (self.name or '?',) - - @constant_for_version - def lookup(self, w_selector): - look_in_shadow = self - while look_in_shadow is not None: - s_method = look_in_shadow.s_methoddict().find_selector(w_selector) - if s_method is not None: - return s_method - look_in_shadow = look_in_shadow._s_superclass - raise MethodNotFound(self, w_selector) - - + # _______________________________________________________________ # Methods used only in testing def inherits_from(self, s_superclass): + "NOT_RPYTHON" # this is only for testing. classshadow = self while classshadow is not None: if classshadow is s_superclass: @@ -328,7 +343,7 @@ w_methoddict = model.W_PointersObject(self.space, None, 2) w_methoddict._store(self.space, 1, model.W_PointersObject(self.space, None, 0)) self._s_methoddict = w_methoddict.as_methoddict_get_shadow(self.space) - self.s_methoddict().sync_cache() + self.s_methoddict().sync_method_cache() self.s_methoddict().invalid = False def installmethod(self, w_selector, w_method): @@ -340,7 +355,7 @@ if isinstance(w_method, model.W_CompiledMethod): s_method.w_compiledin = self.w_self() -class MethodDictionaryShadow(AbstractShadow): +class MethodDictionaryShadow(ListStorageShadow): _immutable_fields_ = ['invalid?', 's_class'] _attrs_ = ['methoddict', 'invalid', 's_class'] @@ -349,23 +364,21 @@ self.invalid = True self.s_class = None self.methoddict = {} - AbstractShadow.__init__(self, space, w_self) + ListStorageShadow.__init__(self, space, w_self, 0) def find_selector(self, w_selector): if self.invalid: return None # we may be invalid if Smalltalk code did not call flushCache return self.methoddict.get(w_selector, None) - def update(self): return self.sync_cache() - # Remove update call for changes to ourselves: # Whenever a method is added, it's keyword is added to w_self, then the # w_compiled_method is added to our observee. - # Sync_cache at this point would not have the desired effect, because in - # the Smalltalk Implementation, the dictionary changes first. Afterwards - # its contents array is filled with the value belonging to the new key. + # sync_method_cache at this point would not have the desired effect, because in + # the Smalltalk Implementation, the dictionary changes first. Afterwards + # its contents array is filled with the value belonging to the new key. def store(self, n0, w_value): - AbstractShadow.store(self, n0, w_value) + ListStorageShadow.store(self, n0, w_value) self.invalid = True def _as_md_entry(self, w_selector): @@ -374,17 +387,17 @@ else: return "%r" % w_selector # use the pointer for this - def sync_cache(self): - if self.w_self().size() == 0: + def sync_method_cache(self): + if self.size() == 0: return - w_values = self.w_self()._fetch(self.space, constants.METHODDICT_VALUES_INDEX) + w_values = self.fetch(self.space, constants.METHODDICT_VALUES_INDEX) assert isinstance(w_values, model.W_PointersObject) s_values = w_values.as_observed_get_shadow(self.space) s_values.notify(self) - size = self.w_self().size() - constants.METHODDICT_NAMES_INDEX + size = self.size() - constants.METHODDICT_NAMES_INDEX self.methoddict = {} for i in range(size): - w_selector = self.w_self()._fetch(self.space, constants.METHODDICT_NAMES_INDEX+i) + w_selector = self.w_self().fetch(self.space, constants.METHODDICT_NAMES_INDEX+i) if not w_selector.is_same_object(self.space.w_nil): if not isinstance(w_selector, model.W_BytesObject): pass @@ -392,7 +405,7 @@ # Putting any key in the methodDict and running with # perform is actually supported in Squeak # raise ClassShadowError("bogus selector in method dict") - w_compiledmethod = w_values._fetch(self.space, i) + w_compiledmethod = w_values.fetch(self.space, i) if not isinstance(w_compiledmethod, model.W_CompiledMethod): raise ClassShadowError("The methoddict must contain " "CompiledMethods only, for now. " @@ -434,17 +447,6 @@ assert e.s_context == self w_self.initialize_storage(self.space, 0) - # def detach_shadow(self): - # w_self = self.w_self() - # assert isinstance(w_self, model.W_PointersObject) - # w_self._vars = [self.space.w_nil] * self._w_self_size - # for i in range(self._w_self_size): - # self.copy_to_w_self(i) - - def copy_from_w_self(self, n0): - self.store(n0, self.w_self()._fetch(self.space, n0)) - def copy_to_w_self(self, n0): - self.w_self()._store(self.space, n0, self.fetch(n0)) class ContextPartShadow(AbstractRedirectingShadow): @@ -1100,29 +1102,25 @@ @elidable_for_version def fetch(self, n0): - return self._w_self._fetch(self.space, n0) + return AbstractCachingShadow.fetch(self, n0) def store(self, n0, w_value): res = self._w_self._store(self.space, n0, w_value) self.changed() return res - def update(self): pass - -class ObserveeShadow(AbstractShadow): +class ObserveeShadow(ListStorageShadow): _attrs_ = ['dependent'] def __init__(self, space, w_self): - AbstractShadow.__init__(self, space, w_self) + ListStorageShadow.__init__(self, space, w_self, 0) self.dependent = None def store(self, n0, w_value): - AbstractShadow.store(self, n0, w_value) + ListStorageShadow.store(self, n0, w_value) self.dependent.update() def notify(self, dependent): if self.dependent is not None and dependent is not self.dependent: raise RuntimeError('Meant to be observed by only one value, so far') self.dependent = dependent - - def update(self): pass diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -230,7 +230,6 @@ self.init_g_objects() self.init_w_objects() self.fillin_w_objects() - self.synchronize_shadows() def read_version(self): # 1 word version @@ -288,8 +287,11 @@ if self.special_object(0).w_object is not self.space.w_nil: raise Warning('Object found in multiple places in the special objects array') # assign w_objects for objects that are already in classtable + import pdb; pdb.set_trace() for name, so_index in constants.classes_in_special_object_table.items(): w_object = self.space.classtable["w_" + name] + if not w_object: + import pdb; pdb.set_trace() if self.special_object(so_index).w_object is None: self.special_object(so_index).w_object = w_object else: @@ -302,13 +304,7 @@ def fillin_w_objects(self): for chunk in self.chunks.itervalues(): - chunk.g_object.w_object.fillin(self.space, chunk.g_object) - - def synchronize_shadows(self): - for chunk in self.chunks.itervalues(): - casted = chunk.g_object.w_object - if isinstance(casted, model.W_PointersObject) and casted.has_shadow(): - casted.shadow.update() + chunk.g_object.fillin(self.space) def init_compactclassesarray(self): """ from the blue book (CompiledMethod Symbol Array PseudoContext LargePositiveInteger nil MethodDictionary Association Point Rectangle nil TranslatedMethod BlockContext MethodContext nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil ) """ @@ -428,6 +424,7 @@ def __init__(self, space): self.space = space self.reader = None + self.filled_in = False def isinitialized(self): return self.reader is not None @@ -568,9 +565,20 @@ raise CorruptImageError("Expected %d words, got %d" % (required_len, len(words))) return words + def fillin(self, space): + if self == self.reader.special_object(6): + import pdb; pdb.set_trace() + + if not self.filled_in: + self.filled_in = True + self.w_object.fillin(space, self) + + def get_g_pointers(self): + assert self.pointers is not None + return self.pointers + def get_pointers(self): - assert self.pointers is not None - return [g_object.w_object for g_object in self.pointers] + return [g_object.w_object for g_object in self.get_g_pointers()] def get_class(self): w_class = self.g_class.w_object diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -7,7 +7,7 @@ from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.rfloat import string_to_float -class AbstractStorageStrategy(object): +class AbstractStorageStrategy(shadow.AbstractShadow): _immutable_fields_ = [] _attrs_ = [] _settled_ = True From noreply at buildbot.pypy.org Tue Mar 25 14:31:49 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 25 Mar 2014 14:31:49 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Working on refactoring. Message-ID: <20140325133149.3B9021C309D@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r694:20c045fd46e5 Date: 2014-03-24 18:31 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/20c045fd46e5/ Log: Working on refactoring. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -497,14 +497,23 @@ if not self.shadow: self.store_shadow(self.default_storage(space, size)) else: + from spyvm.shadow import ClassShadow + if isinstance(self.shadow, ClassShadow) and self.shadow.name == "BlockClosure": + import pdb; pdb.set_trace() + self.shadow.initialize_storage(space, size) def fillin(self, space, g_self): + g_self.g_class.fillin(space) self.s_class = g_self.get_class().as_class_get_penumbra(space) + + if self.s_class.name == "BlockClosure": + import pdb; pdb.set_trace() + self.hash = g_self.get_hash() self.space = space for g_obj in g_self.get_g_pointers(): - g_obj.fillin(space) + g_obj.fillin_nonpointers(space) pointers = g_self.get_pointers() self.initialize_storage(space, len(pointers)) self.store_all(space, pointers) @@ -550,6 +559,8 @@ return self.varsize(space) def size(self): + if not self.shadow: + return 0 return self._get_shadow().size() def store_shadow(self, shadow): diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -36,6 +36,58 @@ return self._executable_path[0] def make_bootstrap_classes(self): + def define_cls(cls_nm, supercls_nm, instvarsize=0, format=shadow.POINTERS, + varsized=False): + assert cls_nm.startswith("w_") + self.classtable[cls_nm] = bootstrap_class(self, instvarsize, \ + None, + None, + format=format, + varsized=varsized, + name=cls_nm[2:]) + +#define_cls("w_Magnitude", "w_Object") + define_cls("w_Character", "w_Magnitude", instvarsize=1) +#define_cls("w_Number", "w_Magnitude") +#define_cls("w_Integer", "w_Number") + define_cls("w_SmallInteger", "w_Integer") + define_cls("w_LargePositiveInteger", "w_Integer", format=shadow.BYTES) + define_cls("w_Float", "w_Number", format=shadow.BYTES) + define_cls("w_Message", "w_Object") +#define_cls("w_Collection", "w_Object") +#define_cls("w_SequenceableCollection", "w_Collection") +#define_cls("w_ArrayedCollection", "w_SequenceableCollection") + define_cls("w_Array", "w_ArrayedCollection", varsized=True) + define_cls("w_String", "w_ArrayedCollection", format=shadow.BYTES) + define_cls("w_Bitmap", "w_ArrayedCollection", varsized=True, format=shadow.WORDS) +#define_cls("w_UndefinedObject", "w_Object") +#define_cls("w_Boolean", "w_Object") +#define_cls("w_True", "w_Boolean") +#define_cls("w_False", "w_Boolean") + define_cls("w_ByteArray", "w_ArrayedCollection", format=shadow.BYTES) + define_cls("w_CompiledMethod", "w_ByteArray", format=shadow.COMPILED_METHOD) +#define_cls("w_ContextPart", "w_Object") + define_cls("w_MethodContext", "w_ContextPart") +#define_cls("w_Link", "w_Object") + #define_cls("w_Process", "w_Link") + #define_cls("w_Point", "w_Object") +#define_cls("w_LinkedList", "w_SequenceableCollection") + define_cls("w_Semaphore", "w_LinkedList") + #define_cls("w_BlockContext", "w_ContextPart", instvarsize=constants.BLKCTX_STACK_START) + define_cls("w_BlockClosure", "w_Object", instvarsize=constants.BLKCLSR_SIZE, varsized=True) + + # make better accessors for classes that can be found in special object + # table + for name in constants.classes_in_special_object_table.keys(): + name = 'w_' + name + if name in self.classtable: + cls = self.classtable.get(name) + setattr(self, name, self.classtable.get(name)) + else: + # assert False, "Missing bootstrapped class from special objects array: %s" % (name,) + pass + + def _make_bootstrap_classes(self): def define_core_cls(name, w_superclass, w_metaclass): assert name.startswith('w_') w_class = bootstrap_class(self, instsize=0, # XXX @@ -106,42 +158,45 @@ varsized=varsized, name=cls_nm[2:]) - define_cls("w_Magnitude", "w_Object") + #define_cls("w_Magnitude", "w_Object") define_cls("w_Character", "w_Magnitude", instvarsize=1) - define_cls("w_Number", "w_Magnitude") - define_cls("w_Integer", "w_Number") + #define_cls("w_Number", "w_Magnitude") + #define_cls("w_Integer", "w_Number") define_cls("w_SmallInteger", "w_Integer") define_cls("w_LargePositiveInteger", "w_Integer", format=shadow.BYTES) define_cls("w_Float", "w_Number", format=shadow.BYTES) define_cls("w_Message", "w_Object") - define_cls("w_Collection", "w_Object") - define_cls("w_SequenceableCollection", "w_Collection") - define_cls("w_ArrayedCollection", "w_SequenceableCollection") + #define_cls("w_Collection", "w_Object") + #define_cls("w_SequenceableCollection", "w_Collection") + #define_cls("w_ArrayedCollection", "w_SequenceableCollection") define_cls("w_Array", "w_ArrayedCollection", varsized=True) define_cls("w_String", "w_ArrayedCollection", format=shadow.BYTES) define_cls("w_Bitmap", "w_ArrayedCollection", varsized=True, format=shadow.WORDS) - define_cls("w_UndefinedObject", "w_Object") - define_cls("w_Boolean", "w_Object") - define_cls("w_True", "w_Boolean") - define_cls("w_False", "w_Boolean") + #define_cls("w_UndefinedObject", "w_Object") + #define_cls("w_Boolean", "w_Object") + #define_cls("w_True", "w_Boolean") + #define_cls("w_False", "w_Boolean") define_cls("w_ByteArray", "w_ArrayedCollection", format=shadow.BYTES) - define_cls("w_MethodDict", "w_Object", instvarsize=2, varsized=True) define_cls("w_CompiledMethod", "w_ByteArray", format=shadow.COMPILED_METHOD) - define_cls("w_ContextPart", "w_Object") + #define_cls("w_ContextPart", "w_Object") define_cls("w_MethodContext", "w_ContextPart") - define_cls("w_Link", "w_Object") + #define_cls("w_Link", "w_Object") define_cls("w_Process", "w_Link") define_cls("w_Point", "w_Object") - define_cls("w_LinkedList", "w_SequenceableCollection") + #define_cls("w_LinkedList", "w_SequenceableCollection") define_cls("w_Semaphore", "w_LinkedList") define_cls("w_BlockContext", "w_ContextPart", instvarsize=constants.BLKCTX_STACK_START) define_cls("w_BlockClosure", "w_Object", instvarsize=constants.BLKCLSR_SIZE, varsized=True) + + # TODO - this class is not needed for the special objects array, so maybe not create it synthetically. + #define_cls("w_MethodDict", "w_Object", instvarsize=2, varsized=True) + # make better accessors for classes that can be found in special object # table for name in constants.classes_in_special_object_table.keys(): name = 'w_' + name setattr(self, name, self.classtable.get(name)) - + def make_bootstrap_objects(self): def bld_char(i): w_cinst = self.w_Character.as_class_get_shadow(self).new() @@ -163,18 +218,22 @@ w_nil.initialize_storage(self, 0) w_nil.s_class = self.classtable['w_UndefinedObject'].as_class_get_penumbra(self) return w_nil - w_nil = self.w_nil = patch_nil(model.w_nil) + w_nil = self.w_nil = model.w_nil + # patch_nil(model.w_nil) - w_true = self.classtable['w_True'].as_class_get_shadow(self).new() + w_true = instantiate(model.W_PointersObject) + #self.classtable['w_True'].as_class_get_shadow(self).new() self.w_true = w_true - w_false = self.classtable['w_False'].as_class_get_shadow(self).new() + w_false = instantiate(model.W_PointersObject) + #self.classtable['w_False'].as_class_get_shadow(self).new() self.w_false = w_false self.w_minus_one = model.W_SmallInteger(-1) self.w_zero = model.W_SmallInteger(0) self.w_one = model.W_SmallInteger(1) self.w_two = model.W_SmallInteger(2) - w_special_selectors = model.W_PointersObject(self, - self.classtable['w_Array'], len(constants.SPECIAL_SELECTORS) * 2) + w_special_selectors = instantiate(model.W_PointersObject) + #model.W_PointersObject(self, + #self.classtable['w_Array'], len(constants.SPECIAL_SELECTORS) * 2) self.w_special_selectors = w_special_selectors self.objtable = {} @@ -252,6 +311,7 @@ return intmask(w_value.value) else: raise UnwrappingError("The value is negative when interpreted as 32bit value.") + import pdb; pdb.set_trace() raise UnwrappingError("expected a W_SmallInteger or W_LargePositiveInteger1Word, got %s" % (w_value,)) def unwrap_uint(self, w_value): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -16,13 +16,10 @@ self.space = space self._w_self = w_self def fetch(self, n0): - import pdb; pdb.set_trace() raise NotImplementedError("Abstract class") def store(self, n0, w_value): - import pdb; pdb.set_trace() raise NotImplementedError("Abstract class") def size(self): - import pdb; pdb.set_trace() raise NotImplementedError("Abstract class") def w_self(self): return self._w_self @@ -118,13 +115,8 @@ def copy_from(self, other_storage): AbstractCachingShadow.copy_from(self, other_storage) - if not self._s_methoddict: - import pdb; pdb.set_trace() def store(self, n0, w_val): - if self.name == "String": - import pdb; pdb.set_trace() - AbstractCachingShadow.store(self, n0, w_val) if n0 == constants.CLASS_SUPERCLASS_INDEX: self.store_w_superclass(w_val) @@ -176,20 +168,6 @@ raise ClassShadowError("unknown format %d" % (format,)) elif n0 == constants.CLASS_NAME_INDEX: self.store_w_name(w_val) - elif n0 == (self.size() - 1): - # Some heuristic to find the classname - # Only used for debugging - # XXX This is highly experimental XXX - # if the name-pos of class is not bytesobject, - # we are probably holding a metaclass instead of a class. - # metaclasses hold a pointer to the real class in the last - # slot. This is pos 6 in mini.image and higher in squeak3.9 - if (isinstance(w_val, model.W_PointersObject) - and w_val.size() > constants.CLASS_NAME_INDEX): - # TODO ADD TEST WHICH GOES OVER THIS PART - self.store_w_name(w_realclass.fetch(constants.CLASS_NAME_INDEX)) - else: - return else: return # Some of the special info has changed -> Switch version. @@ -219,6 +197,8 @@ self.name = w_name.as_string() else: self.name = None + if self.name == "BlockClosure": + import pdb; pdb.set_trace() @jit.unroll_safe def flush_method_caches(self): @@ -256,8 +236,6 @@ return self._s_methoddict.w_self() def s_methoddict(self): - if not hasattr(self, "_s_methoddict"): - import pdb; pdb.set_trace() return self._s_methoddict def s_superclass(self): diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -278,26 +278,29 @@ chunk.g_object.init_w_object() def assign_prebuilt_constants(self): - # assign w_objects for objects that are already in objtable - for name, so_index in constants.objects_in_special_object_table.items(): - w_object = self.space.objtable["w_" + name] + # Assign classes and objects that in special objects array that are already created. + self._assign_prebuilt_constants(constants.objects_in_special_object_table, self.space.objtable, False) + self._assign_prebuilt_constants(constants.classes_in_special_object_table, self.space.classtable, False) + + # Make sure that all prebuilt classes are actually used in the special classes array + for prebuilt_classname in self.space.classtable.keys(): + prebuilt_classname = prebuilt_classname[2:] + assert prebuilt_classname in constants.classes_in_special_object_table.keys(), \ + "Prebuilt class is not used in the special objects array: %s" % (prebuilt_classname,) + + def _assign_prebuilt_constants(self, names_and_indices, prebuilt_objects, force_existance=False): + for name, so_index in names_and_indices.items(): + name = "w_" + name + if name not in prebuilt_objects and not force_existance: + continue + w_object = prebuilt_objects[name] + assert not force_existance or w_object if self.special_object(so_index).w_object is None: self.special_object(so_index).w_object = w_object else: if self.special_object(0).w_object is not self.space.w_nil: raise Warning('Object found in multiple places in the special objects array') - # assign w_objects for objects that are already in classtable - import pdb; pdb.set_trace() - for name, so_index in constants.classes_in_special_object_table.items(): - w_object = self.space.classtable["w_" + name] - if not w_object: - import pdb; pdb.set_trace() - if self.special_object(so_index).w_object is None: - self.special_object(so_index).w_object = w_object - else: - if self.special_object(0).w_object is not self.space.w_nil: - raise Warning('Object found in multiple places in the special objects array') - + def special_object(self, index): special = self.chunks[self.specialobjectspointer].g_object.pointers return special[index] @@ -439,6 +442,7 @@ w_int = self.space.wrap_int(value) reader.intcache[value] = w_int self.w_object = w_int + self.filled_in = True def initialize(self, chunk, reader): self.reader = reader @@ -565,10 +569,11 @@ raise CorruptImageError("Expected %d words, got %d" % (required_len, len(words))) return words + def fillin_nonpointers(self, space): + if not self.filled_in and (self.isbytes() or self.iswords()): + self.fillin(space) + def fillin(self, space): - if self == self.reader.special_object(6): - import pdb; pdb.set_trace() - if not self.filled_in: self.filled_in = True self.w_object.fillin(space, self) From noreply at buildbot.pypy.org Tue Mar 25 14:31:53 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 25 Mar 2014 14:31:53 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Compiling and working. Message-ID: <20140325133153.1E40B1C309D@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r695:58619053a3d9 Date: 2014-03-25 14:30 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/58619053a3d9/ Log: Compiling and working. Removed a lot of the bootstrap code in objspace. Strategies are removed for now, will add after refactoring. Will fix tests next. diff too long, truncating to 2000 out of 307516 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -36,4 +36,4 @@ Workspace allInstances do: [:w | w topView delete]. ReleaseBuilderFor4dot4 prepareNewBuild. Smalltalk snapshot: true andQuit: true. -! ----End fileIn of a stream----! ----SNAPSHOT----{31 March 2013 . 3:27:34 pm} Squeak4.5-12327.image priorSource: 7430688! !Installer methodsFor: 'squeakmap' stamp: 'fbs 1/28/2013 19:25' prior: 57597950! packageAndVersionFrom: pkg | p | p := ReadStream on: pkg . ^{(p upTo: $(). p upTo: $)} collect: [:s | s withBlanksTrimmed].! ! "Installer-Core"! !Categorizer methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 16:58'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !ClassCategoryReader methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:21'! scanFrom: aStream environment: anEnvironment "File in methods from the stream, aStream." | methodText | [methodText := aStream nextChunkText. methodText size > 0] whileTrue: [class compile: methodText environment: anEnvironment classified: category withStamp: changeStamp notifying: nil]! ! !ClassCommentReader methodsFor: 'as yet unclassified' stamp: 'cwp 6/20/2012 17:22'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !Metaclass methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:29'! bindingOf: varName environment: anEnvironment ^ thisClass classBindingOf: varName environment: anEnvironment! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 20:03' prior: 22505876! \\ aNumber "Primitive. Take the receiver modulo the argument. The result is the remainder rounded towards negative infinity, of the receiver divided by the argument. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." aNumber isInteger ifTrue: [| neg qr q r | neg := self negative == aNumber negative == false. qr := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: neg). q := qr first normalize. r := qr last normalize. ^(q negative ifTrue: [r isZero not] ifFalse: [q isZero and: [neg]]) ifTrue: [r + aNumber] ifFalse: [r]]. ^super \\ aNumber ! ! !LargePositiveInteger methodsFor: 'converting' stamp: 'nice 1/27/2012 22:41' prior: 37616324! asFloat "Answer a Float that best approximates the value of the receiver. This algorithm is optimized to process only the significant digits of a LargeInteger. And it does honour IEEE 754 round to nearest even mode in case of excess precision (see details below)." "How numbers are rounded in IEEE 754 default rounding mode: A shift is applied so that the highest 53 bits are placed before the floating point to form a mantissa. The trailing bits form the fraction part placed after the floating point. This fractional number must be rounded to the nearest integer. If fraction part is 2r0.1, exactly between two consecutive integers, there is a tie. The nearest even integer is chosen in this case. Examples (First 52bits of mantissa are omitted for brevity): 2r0.00001 is rounded downward to 2r0 2r1.00001 is rounded downward to 2r1 2r0.1 is a tie and rounded to 2r0 (nearest even) 2r1.1 is a tie and rounded to 2r10 (nearest even) 2r0.10001 is rounded upward to 2r1 2r1.10001 is rounded upward to 2r10 Thus, if the next bit after floating point is 0, the mantissa is left unchanged. If next bit after floating point is 1, an odd mantissa is always rounded upper. An even mantissa is rounded upper only if the fraction part is not a tie." "Algorihm details: The floating point hardware can perform the rounding correctly with several excess bits as long as there is a single inexact operation. This can be obtained by splitting the mantissa plus excess bits in two part with less bits than Float precision. Note 1: the inexact flag in floating point hardware must not be trusted because in some cases the operations would be exact but would not take into account some bits that were truncated before the Floating point operations. Note 2: the floating point hardware is presumed configured in default rounding mode." | mantissa shift excess result n | "Check how many bits excess the maximum precision of a Float mantissa." excess := self highBitOfMagnitude - Float precision. excess > 7 ifTrue: ["Remove the excess bits but seven." mantissa := self bitShiftMagnitude: 7 - excess. shift := excess - 7. "An even mantissa with a single excess bit immediately following would be truncated. But this would not be correct if above shift has truncated some extra bits. Check this case, and round excess bits upper manually." ((mantissa digitAt: 1) = 2r01000000 and: [self anyBitOfMagnitudeFrom: 1 to: shift]) ifTrue: [mantissa := mantissa + 1]] ifFalse: [mantissa := self. shift := 0]. "There will be a single inexact round off at last iteration" result := (mantissa digitAt: (n := mantissa digitLength)) asFloat. [(n := n - 1) > 0] whileTrue: [ result := 256.0 * result + (mantissa digitAt: n) asFloat]. ^result timesTwoPower: shift.! ! !LargePositiveInteger methodsFor: 'private' stamp: 'nice 12/30/2012 14:25'! primitiveQuo: anInteger "Primitive. Divide the receiver by the argument and return the result. Round the result down towards zero to make it a whole integer. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." ^nil! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 14:34'! rem: aNumber "Remainder defined in terms of quo:. See super rem:. This is defined only to speed up case of very large integers." (self primitiveQuo: aNumber) ifNotNil: [:quo | ^self - (quo * aNumber)]. aNumber isInteger ifTrue: [| ng rem | ng := self negative == aNumber negative == false. rem := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: ng) at: 2. ^ rem normalize]. ^super rem: aNumber! ! !LargeNegativeInteger methodsFor: 'converting' stamp: 'nice 1/1/2013 15:42' prior: 37616204! asFloat ^super asFloat negated! ! !UndefinedObject methodsFor: 'class hierarchy' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:32'! bindingOf: varName environment: anEnvironment ^superclass bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:30'! classBindingOf: varName environment: anEnvironment ^self bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:37'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor "Postprocesses a literal scanned by Scanner scanToken (esp. xLitQuote). If scannedLiteral is not an association, answer it. Else, if it is of the form: nil->#NameOfMetaclass answer nil->theMetaclass, if any has that name, else report an error. Else, if it is of the form: #NameOfGlobalVariable->anythiEng answer the global, class, or pool association with that nameE, if any, else add it to Undeclared a answer the new Association." | key value | (scannedLiteral isVariableBinding) ifFalse: [^ scannedLiteral]. key := scannedLiteral key. value := scannedLiteral value. key ifNil: "###" [(self bindingOf: value environment: anEnvironment) ifNotNil: [:assoc| (assoc value isKindOf: Behavior) ifTrue: [^ nil->assoc value class]]. requestor notify: 'No such metaclass'. ^false]. (key isSymbol) ifTrue: "##" [(self bindingOf: key environment: anEnvironment) ifNotNil: [:assoc | ^assoc]. ^ anEnvironment undeclared: key]. requestor notify: '## must be followed by a non-local variable name'. ^false " Form literalScannedAs: 14 notifying: nil 14 Form literalScannedAs: #OneBitForm notiEfying: nil OneBitForm Form literalScannedAs: ##OneBitForm notifying: nil OneBitForm->a Form Form literalScannedAs: ##Form notifying: nil Form->Form Form literalScannedAs: ###Form notifying: nil nilE->Form class "! ! !Fraction methodsFor: 'converting' stamp: 'nice 11/21/2011 22:34' prior: 37619655! asFloat "Answer a Float that closely approximates the value of the receiver. This implementation will answer the closest floating point number to the receiver. In case of a tie, it will use the IEEE 754 round to nearest even mode. In case of overflow, it will answer +/- Float infinity." | a b mantissa exponent hasTruncatedBits lostBit n ha hb hm | a := numerator abs. b := denominator. "denominator is always positive" ha := a highBitOfMagnitude. hb := b highBitOfMagnitude. "Number of bits to keep in mantissa plus one to handle rounding." n := 1 + Float precision. "If both numerator and denominator are represented exactly in floating point number, then fastest thing to do is to use hardwired float division." (ha < n and: [hb < n]) ifTrue: [^numerator asFloat / denominator asFloat]. "Shift the fraction by a power of two exponent so as to obtain a mantissa with n bits. First guess is rough, the mantissa might have n+1 bits." exponent := ha - hb - n. exponent >= 0 ifTrue: [b := b bitShift: exponent] ifFalse: [a := a bitShift: exponent negated]. mantissa := a quo: b. hasTruncatedBits := a > (mantissa * b). hm := mantissa highBit. "Check for gradual underflow, in which case the mantissa will loose bits. Keep at least one bit to let underflow preserve the sign of zero." lostBit := Float emin - (exponent + hm - 1). lostBit > 0 ifTrue: [n := n - lostBit max: 1]. "Remove excess bits in the mantissa." hm > n ifTrue: [exponent := exponent + hm - n. hasTruncatedBits := hasTruncatedBits or: [mantissa anyBitOfMagnitudeFrom: 1 to: hm - n]. mantissa := mantissa bitShift: n - hm]. "Check if mantissa must be rounded upward. The case of tie (mantissa odd & hasTruncatedBits not) will be handled by Integer>>asFloat." (hasTruncatedBits and: [mantissa odd]) ifTrue: [mantissa := mantissa + 1]. ^ (self positive ifTrue: [mantissa asFloat] ifFalse: [mantissa asFloat negated]) timesTwoPower: exponent! ! !Float methodsFor: 'arithmetic' stamp: 'nice 12/20/2012 23:16' prior: 20878776! negated "Answer a Number that is the negation of the receiver. Implementation note: this version cares of negativeZero." ^-1.0 * self! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:21'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor ^ self compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: self acceptsLoggingOfCompilation! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 12/27/2012 13:17'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: logSource | methodAndNode context methodNode | context := CompilationCue source: text class: self environment: anEnvironment category: category requestor: requestor. methodNode := self newCompiler compile: context ifFail: [^ nil]. methodAndNode := CompiledMethodWithNode generateMethodFromNode: methodNode trailer: self defaultMethodTrailer. logSource ifTrue: [ self logMethodSource: text forMethodWithNode: methodAndNode inCategory: category withStamp: changeStamp notifying: requestor. ]. self addAndClassifySelector: methodAndNode selector withMethod: methodAndNode method inProtocol: category notifying: requestor. self instanceSide noteCompilationOf: methodAndNode selector meta: self isClassSide. ^ methodAndNode selector! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:47'! bindingOf: varName environment: anEnvironment "Answer the binding of some variable resolved in the scope of the receiver" | aSymbol binding | aSymbol := varName asSymbol. "First look in classVar dictionary." binding := self classPool bindingOf: aSymbol. binding ifNotNil:[^binding]. "Next look in shared pools." self sharedPools do:[:pool | binding := pool bindingOf: aSymbol. binding ifNotNil:[^binding]. ]. "Next look in declared environment." binding := anEnvironment bindingOf: aSymbol. binding ifNotNil:[^binding]. "Finally look higher up the superclass chain and fail at the end." superclass == nil ifTrue: [^ nil] ifFalse: [^ superclass bindingOf: aSymbol]. ! ! "Kernel"! ParseNode subclass: #Encoder instanceVariableNames: 'scopeTable nTemps supered requestor class selector literalStream selectorSet litIndSet litSet sourceRanges globalSourceRanges addedSelectorAndMethodClassLiterals optimizedSelectors cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Encoder commentStamp: 'cwp 12/26/2012 23:29' prior: 36323851! I encode names and literals into tree nodes with byte codes for the compiler. Byte codes for literals are not assigned until the tree-sizing pass of the compiler, because only then is it known which literals are actually needed. I also keep track of sourceCode ranges during parsing and code generation so I can provide an inverse map for the debugger.! Scanner subclass: #Parser instanceVariableNames: 'here hereType hereMark hereEnd prevMark prevEnd encoder requestor parseNode failBlock requestorOffset tempsMark doitFlag properties category queriedUnusedTemporaries cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Parser commentStamp: 'cwp 12/26/2012 23:34' prior: 38557958! I parse Smalltalk syntax and create a MethodNode that is the root of the parse tree. I look one token ahead.! Object subclass: #CompilationCue instanceVariableNames: 'source context receiver class environment category requestor' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! Object subclass: #Compiler instanceVariableNames: 'sourceStream requestor class category context parser cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Compiler commentStamp: 'cwp 12/26/2012 23:17' prior: 59257505! The compiler accepts Smalltalk source code and compiles it with respect to a given class. The user of the compiler supplies a context so that temporary variables are accessible during compilation. If there is an error, a requestor (usually a kind of StringHolderController) is sent the message notify:at:in: so that the error message can be displayed. If there is no error, then the result of compilation is a MethodNode, which is the root of a parse tree whose nodes are kinds of ParseNodes. The parse tree can be sent messages to (1) generate code for a CompiledMethod (this is done for compiling methods or evaluating expressions); (2) pretty-print the code (for formatting); or (3) produce a map from object code back to source code (used by debugger program-counter selection). See also Parser, Encoder, ParseNode.! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:34'! init: aCue notifying: anObject "The use of the variable requestor is a bit confusing here. This is *not* the original requestor, which is available through the cue. It's the Parser instance that is using the encoder." self setCue: aCue. requestor := anObject. nTemps := 0. supered := false. self initScopeAndLiteralTables. cue getClass variablesAndOffsetsDo: [:variable "" :offset "" | offset isNil ifTrue: [scopeTable at: variable name put: (FieldNode new fieldDefinition: variable)] ifFalse: [scopeTable at: variable put: (offset >= 0 ifTrue: [InstanceVariableNode new name: variable index: offset] ifFalse: [MaybeContextInstanceVariableNode new name: variable index: offset negated])]]. cue context ~~ nil ifTrue: [| homeNode | homeNode := self bindTemp: self doItInContextName. "0th temp = aContext passed as arg" cue context tempNames withIndexDo: [:variable :index| scopeTable at: variable put: (MessageAsTempNode new receiver: homeNode selector: #namedTempAt: arguments: (Array with: (self encodeLiteral: index)) precedence: 3 from: self)]]. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32 ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/26/2012 23:30'! setCue: aCue cue := aCue. "Also set legacy instance variables for methods that don't use cue yet" class := cue getClass.! ! !Dictionary methodsFor: '*Compiler' stamp: 'cwp 6/22/2012 09:17'! bindingOf: varName ifAbsent: aBlock ^self associationAt: varName ifAbsent: aBlock! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:37'! init: sourceStream cue: aCue failBlock: aBlock self setCue: aCue. failBlock := aBlock. requestorOffset := 0. super scan: sourceStream. prevMark := hereMark := mark. self advance ! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/26/2012 23:41'! parse: sourceStream cue: aCue noPattern: noPattern ifFail: aBlock "Answer a MethodNode for the argument, sourceStream, that is the root of a parse tree. Parsing is done with respect to the CompilationCue to resolve variables. Errors in parsing are reported to the cue's requestor; otherwise aBlock is evaluated. The argument noPattern is a Boolean that is true if the the sourceStream does not contain a method header (i.e., for DoIts)." | methNode repeatNeeded myStream s p subSelection | myStream := sourceStream. [repeatNeeded := false. p := myStream position. s := myStream upToEnd. myStream position: p. subSelection := aCue requestor notNil and: [aCue requestor selectionInterval = (p + 1 to: p + s size)]. self encoder init: aCue notifying: self. self init: myStream cue: aCue failBlock: [^ aBlock value]. doitFlag := noPattern. failBlock:= aBlock. [methNode := self method: noPattern context: cue context] on: ReparseAfterSourceEditing do: [ :ex | repeatNeeded := true. myStream := subSelection ifTrue: [ReadStream on: cue requestor text string from: cue requestor selectionInterval first to: cue requestor selectionInterval last] ifFalse: [ReadStream on: cue requestor text string]]. repeatNeeded] whileTrue: [encoder := self encoder class new]. methNode sourceText: s. ^methNode ! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:35'! setCue: aCue cue := aCue. "Also set legacy variables for methods that don't use cue yet." requestor := cue requestor. category := cue category.! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! class: aClass ^ self context: nil class: aClass requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! context: aContext class: aClass requestor: anObject ^ self source: nil context: aContext receiver: nil class: aClass environment: (aClass ifNotNil: [aClass environment]) category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aTextOrStream class: aClass environment: anEnvironment category: aString requestor: anObject ^ self source: aTextOrStream context: nil receiver: nil class: aClass environment: anEnvironment category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! source: aTextOrStream context: aContext class: aClass category: aString requestor: anObject ^ self source: aTextOrStream context: aContext receiver: (aContext ifNotNil: [aContext receiver]) class: aClass environment: (aClass ifNotNil: [aClass environment]) category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream context: aContext class: aClass requestor: anObject ^ self source: aTextOrStream context: aContext class: aClass category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:55'! source: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject ^ self basicNew initializeWithSource: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aString environment: anEnvironment ^ self source: aString context: nil receiver: nil class: UndefinedObject environment: anEnvironment category: nil requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream requestor: anObject ^ self source: aTextOrStream context: nil class: nil requestor: anObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/20/2012 09:39'! bindingOf: aSymbol ^ class bindingOf: aSymbol environment: environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! category ^ category! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 12/26/2012 23:19'! context ^ context! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! environment ^ environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! getClass ^ class! ! !CompilationCue methodsFor: 'initialization' stamp: 'cwp 12/26/2012 23:16'! initializeWithSource: aTextOrString context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject self initialize. source := aTextOrString isStream ifTrue: [aTextOrString contents] ifFalse: [aTextOrString]. context := aContext. receiver := recObject. class := aClass. environment := anEnvironment. category := aString. requestor := reqObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: anObject notifying: anEncoder ^ class literalScannedAs: anObject environment: environment notifying: anEncoder! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! receiver ^ receiver! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! requestor ^ requestor! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! source ^ source! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:44'! sourceStream ^ source readStream! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 6/20/2012 17:25'! evaluate: aString environment: anEnvironment ^ self evaluate: aString environment: anEnvironment logged: false! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 12/27/2012 12:36'! evaluate: aString environment: anEnvironment logged: aBoolean | cue | cue := CompilationCue source: aString environment: anEnvironment. ^ self new evaluate: aString cue: cue ifFail: [^ nil] logged: aBoolean! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 13:18'! compile: aCue ifFail: failBlock "Answer a MethodNode. If the MethodNode can not be created, notify the requestor in the contxt. If the requestor is nil, evaluate failBlock instead. The MethodNode is the root of a parse tree. It can be told to generate a CompiledMethod to be installed in the method dictionary of the class specified by the context." self setCue: aCue. self source: cue source. ^self translate: sourceStream noPattern: false ifFail: failBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:06'! evaluate: textOrStream cue: aCue ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | methodNode method value toLog itsSelection itsSelectionString | self setCue: aCue. self source: textOrStream. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [methodNode generateWithTempNames] ifFalse: [methodNode generate]. value := cue receiver withArgs: (cue context ifNil: [#()] ifNotNil: [{cue context}]) executeMethod: method. logFlag ifTrue: [toLog := ((cue requestor respondsTo: #selection) and:[(itsSelection := cue requestor selection) notNil and:[(itsSelectionString := itsSelection asString) isEmptyOrNil not]]) ifTrue:[itsSelectionString] ifFalse:[sourceStream contents]. SystemChangeNotifier uniqueInstance evaluated: toLog context: cue context]. ^ value ! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:20'! setCue: aCue cue := aCue. "Set legacy instance variables for methods that don't use cue yet." requestor := cue requestor. class := cue getClass. category := cue category. context := cue context.! ! !Compiler methodsFor: 'private' stamp: 'cwp 6/19/2012 21:58'! source: textOrStream sourceStream := (textOrStream isKindOf: PositionableStream) ifTrue: [ textOrStream ] ifFalse: [ ReadStream on: textOrStream asString ]! ! "Compiler"! !SmartRefStream class methodsFor: 'i/o' stamp: 'cwp 6/20/2012 17:42'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !SmartRefStream methodsFor: 'read write' stamp: 'cwp 6/20/2012 17:41'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !ImageSegment methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:23'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !PseudoClass methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | aString newTranslations assoc currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:26'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | newTranslations currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [| aString assoc | Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !ObjectScanner methodsFor: 'scanning' stamp: 'cwp 6/20/2012 17:39'! scanFrom: aByteStream environment: anEnvironment "This should probably be reimplemented using an environment for compilation. For now, don't change anything" ^ self scanFrom: aByteStream! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 09:16'! bindingOf: varName ifAbsent: aBlock "SystemDictionary includes Symbols only" ^super bindingOf: varName asSymbol ifAbsent: aBlock! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 15:48'! undeclared ^ self at: #Undeclared! ! "System"! !ExceptionTests methodsFor: 'testing-outer' stamp: 'fbs 1/1/2013 22:14' prior: 40840955! expectedFailures ^ #().! ! "Tests"! ReleaseBuilder subclass: #ReleaseBuilderFor4dot5 instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'ReleaseBuilder'! !ReleaseBuilderFor4dot5 commentStamp: 'fbs 1/1/2013 20:25' prior: 0! The release builder for Squeak 4.5! !ReleaseBuilder class methodsFor: 'scripts' stamp: 'fbs 12/31/2012 20:43'! transferCurrentPackagesAsUser: username password: password "Copy the packages currently loaded in the image from the trunk repository to my releaseRepository." | trunkRep releaseRep | trunkRep := self trunkRepository. releaseRep := self releaseRepository user: username; password: password; yourself. MCWorkingCopy allManagers do: [ : eachWorkingCopy | eachWorkingCopy ancestors do: [ : eachVersionInfo | (releaseRep includesVersionNamed: eachVersionInfo versionName) ifFalse: [ (trunkRep versionWithInfo: eachVersionInfo) ifNil: [ Warning signal: eachVersionInfo name , ' not found in ', trunkRep ] ifNotNilDo: [ : ver | releaseRep storeVersion: ver ] ] ] ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! openWelcomeWorkspaces TheWorldMainDockingBar instance showWelcomeText: #squeakUserInterface label: 'Squeak User Interface' in: (40 @ 40 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #workingWithSqueak label: 'Working With Squeak' in: (80 @ 80 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #licenseInformation label: 'License Information' in: (120 @ 120 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeFutureDirections label: 'Future Directions' in: (160 @ 160 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeToSqueak label: 'Welcome to Squeak 4.5' in: (200 @ 200 extent: 500 @ 300)! ! !ReleaseBuilderFor4dot5 class methodsFor: 'scripts' stamp: 'fbs 1/1/2013 20:22'! prepareNewBuild super prepareNewBuild. MCMockPackageInfo initialize.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:24'! releaseRepository "At release time, change 'trunk' to 'squeak45'." ^ MCHttpRepository location: 'http://source.squeak.org/trunk' user: 'squeak' password: 'squeak'! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:22'! setDisplayExtent: extent "Uncomment next line when the primitives become available in the Squeak VM." " DisplayScreen hostWindowSize: extent." Display extent = extent ifFalse: [ Warning signal: 'Display extent not set to ', extent ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! setPreferences Preferences installBrightWindowColors ; setPreference: #scrollBarsWithoutMenuButton toValue: true ; setPreference: #swapMouseButtons toValue: true ; setPreference: #annotationPanes toValue: true ; setPreference: #showSplitterHandles toValue: false ; setPreference: #showBoundsInHalo toValue: true ; setPreference: #alternateHandlesLook toValue: false ; setPreference: #roundedMenuCorners toValue: false ; setPreference: #roundedWindowCorners toValue: false. PluggableButtonMorph roundedButtonCorners: false. FillInTheBlankMorph roundedDialogCorners: false. Workspace shouldStyle: false. NetNameResolver enableIPv6: true.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! switchToNewRepository | old44Repository | MCMcmUpdater defaultUpdateURL: self releaseRepository description. old44Repository := MCRepositoryGroup default repositories detect: [:each | each description includesSubString: 'squeak44'] ifNone: [nil]. old44Repository ifNotNil: [MCRepositoryGroup default removeRepository: old44Repository]. MCRepositoryGroup default addRepository: self releaseRepository! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! versionString ^ 'Squeak4.5'.! ! ReleaseBuilder class removeSelector: #transferCurrentPackages! "ReleaseBuilder"! !Environment class methodsFor: 'as yet unclassified' stamp: 'cwp 1/1/2013 18:52' prior: 40834114! initialize self install! ! "Environments"! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:59' prior: 52081878! initPattern: aString notifying: req return: aBlock | result | self init: (ReadStream on: aString asString) cue: (CompilationCue source: aString requestor: req) failBlock: [^nil]. encoder := self. result := aBlock value: (self pattern: false inContext: nil). encoder := failBlock := nil. "break cycles" ^result! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:01' prior: 34175471! parse: sourceStream class: class category: aCategory noPattern: noPattern context: aContext notifying: req ifFail: aBlock | c | c := CompilationCue source: sourceStream context: aContext class: class category: aCategory requestor: req. ^ self parse: sourceStream cue: c noPattern: noPattern ifFail: aBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:11' prior: 34183963! evaluate: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | theClass | theClass := ((aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class). self setCue: (CompilationCue source: textOrStream context: aContext receiver: receiver class: theClass environment: theClass environment category: nil requestor: aRequestor). ^ self evaluate: textOrStream cue: cue ifFail: failBlock logged: logFlag! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:17' prior: 34185488! from: textOrStream class: aClass classified: aCategory context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass category: aCategory requestor: req)! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:55' prior: 50781309! from: textOrStream class: aClass context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass requestor: req) ! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/27/2012 09:41' prior: 50996506! init: aClass context: aContext notifying: anObject | c | c := CompilationCue context: aContext class: aClass requestor: nil. self init: c notifying: anObject! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:58' prior: 39061698! temps: tempVars literals: lits class: cl "Initialize this encoder for decompilation." self setCue: (CompilationCue class: cl). supered := false. nTemps := tempVars size. tempVars do: [:node | scopeTable at: node name put: node]. literalStream := WriteStream on: (Array new: lits size). literalStream nextPutAll: lits. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32.! ! "Compiler"! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:48' prior: 36026010! addClassVarName: aString "Add the argument, aString, as a class variable of the receiver. Signal an error if the first character of aString is not capitalized, or if it is already a variable named in the class." | symbol oldState | oldState := self copy. aString first canBeGlobalVarInitial ifFalse: [^self error: aString, ' class variable name should be capitalized; proceed to include anyway.']. symbol := aString asSymbol. self withAllSubclasses do: [:subclass | (self canFindWithoutEnvironment: symbol) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: aString; signal: aString, ' is already defined']]. classPool == nil ifTrue: [classPool := Dictionary new]. (classPool includesKey: symbol) ifFalse: ["Pick up any refs in Undeclared" classPool declare: symbol from: environment undeclared. SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: oldState to: self]! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:48' prior: 54782024! bindingOf: varName ^ self bindingOf: varName environment: self environment! ! !Class methodsFor: 'organization' stamp: 'cwp 6/25/2012 18:25' prior: 54785804! category "Answer the system organization category for the receiver. First check whether the category name stored in the ivar is still correct and only if this fails look it up (latter is much more expensive)" category ifNotNil: [ :symbol | ((self environment organization listAtCategoryNamed: symbol) includes: self name) ifTrue: [ ^symbol ] ]. category := self environment organization categoryOfElement: self name. ^category! ! !Class methodsFor: 'initialize-release' stamp: 'cwp 6/22/2012 15:49' prior: 36027730! declare: varString "Declare class variables common to all instances. Answer whether recompilation is advisable." | newVars conflicts | newVars := (Scanner new scanFieldNames: varString) collect: [:x | x asSymbol]. newVars do: [:var | var first canBeGlobalVarInitial ifFalse: [self error: var, ' class variable name should be capitalized; proceed to include anyway.']]. conflicts := false. classPool == nil ifFalse: [(classPool keys reject: [:x | newVars includes: x]) do: [:var | self removeClassVarName: var]]. (newVars reject: [:var | self classPool includesKey: var]) do: [:var | "adding" "check if new vars defined elsewhere" (self canFindWithoutEnvironment: var) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: var; signal: var, ' is already defined'. conflicts := true]]. newVars size > 0 ifTrue: [classPool := self classPool. "in case it was nil" newVars do: [:var | classPool declare: var from: environment undeclared]]. ^conflicts! ! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:49' prior: 54802475! removeClassVarName: aString "Remove the class variable whose name is the argument, aString, from the names defined in the receiver, a class. Create an error notification if aString is not a class variable or if it is still being used in the code of the class." | aSymbol | aSymbol := aString asSymbol. (classPool includesKey: aSymbol) ifFalse: [^self error: aString, ' is not a class variable']. self withAllSubclasses do:[:subclass | (Array with: subclass with: subclass class) do:[:classOrMeta | (classOrMeta whichSelectorsReferTo: (classPool associationAt: aSymbol)) isEmpty ifFalse: [ InMidstOfFileinNotification signal ifTrue: [ Transcript cr; show: self name, ' (' , aString , ' is Undeclared) '. ^ environment undeclared declare: aSymbol from: classPool]. (self confirm: (aString,' is still used in code of class ', classOrMeta name, '.\Is it okay to move it to Undeclared?') withCRs) ifTrue:[^Undeclared declare: aSymbol from: classPool] ifFalse:[^self]]]]. classPool removeKey: aSymbol. classPool isEmpty ifTrue: [classPool := nil]. ! ! !Class methodsFor: 'class name' stamp: 'cwp 6/22/2012 15:49' prior: 54796206! rename: aString "The new name of the receiver is the argument, aString." | oldName newName | (newName := aString asSymbol) = (oldName := self name) ifTrue: [^ self]. (self environment includesKey: newName) ifTrue: [^ self error: newName , ' already exists']. (environment undeclared includesKey: newName) ifTrue: [self inform: 'There are references to, ' , aString printString , ' from Undeclared. Check them after this change.']. name := newName. self environment renameClass: self from: oldName! ! !ClassBuilder methodsFor: 'class definition' stamp: 'cwp 6/22/2012 01:05' prior: 39054430! name: className inEnvironment: env subclassOf: newSuper type: type instanceVariableNames: instVarString classVariableNames: classVarString poolDictionaries: poolString category: category unsafe: unsafe "Define a new class in the given environment. If unsafe is true do not run any validation checks. This facility is provided to implement important system changes." | oldClass instVars classVars copyOfOldClass newClass | environ := env. instVars := Scanner new scanFieldNames: instVarString. classVars := (Scanner new scanFieldNames: classVarString) collect: [:x | x asSymbol]. "Validate the proposed name" unsafe ifFalse:[(self validateClassName: className) ifFalse:[^nil]]. oldClass := env at: className ifAbsent:[nil]. oldClass isBehavior ifFalse: [oldClass := nil] "Already checked in #validateClassName:" ifTrue: [ copyOfOldClass := oldClass copy. copyOfOldClass superclass addSubclass: copyOfOldClass]. [ | newCategory needNew force organization oldCategory | unsafe ifFalse:[ "Run validation checks so we know that we have a good chance for recompilation" (self validateSuperclass: newSuper forSubclass: oldClass) ifFalse:[^nil]. (self validateInstvars: instVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateClassvars: classVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateSubclassFormat: type from: oldClass forSuper: newSuper extra: instVars size) ifFalse:[^nil]]. "See if we need a new subclass" needNew := self needsSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. needNew == nil ifTrue:[^nil]. "some error" (needNew and:[unsafe not]) ifTrue:[ "Make sure we don't redefine any dangerous classes" (self tooDangerousClasses includes: oldClass name) ifTrue:[ self error: oldClass name, ' cannot be changed'. ]. "Check if the receiver should not be redefined" (oldClass ~~ nil and:[oldClass shouldNotBeRedefined]) ifTrue:[ self notify: oldClass name asText allBold, ' should not be redefined. \Proceed to store over it.' withCRs]]. needNew ifTrue:[ "Create the new class" newClass := self newSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. newClass == nil ifTrue:[^nil]. "Some error" newClass setName: className. newClass environment: environ. ] ifFalse:[ "Reuse the old class" newClass := oldClass. ]. "Install the class variables and pool dictionaries... " force := (newClass declare: classVarString) | (newClass sharing: poolString). "... classify ..." newCategory := category asSymbol. organization := environ ifNotNil:[environ organization]. oldClass isNil ifFalse: [oldCategory := (organization categoryOfElement: oldClass name) asSymbol]. organization classify: newClass name under: newCategory suppressIfDefault: true. "... recompile ..." newClass := self recompile: force from: oldClass to: newClass mutate: false. "... export if not yet done ..." (environ at: newClass name ifAbsent:[nil]) == newClass ifFalse:[ [environ at: newClass name put: newClass] on: AttemptToWriteReadOnlyGlobal do:[:ex| ex resume: true]. environ flushClassNameCache. ]. newClass doneCompiling. "... notify interested clients ..." oldClass isNil ifTrue: [ SystemChangeNotifier uniqueInstance classAdded: newClass inCategory: newCategory. ^ newClass]. newCategory ~= oldCategory ifTrue: [SystemChangeNotifier uniqueInstance class: newClass recategorizedFrom: oldCategory to: category] ifFalse: [SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: copyOfOldClass to: newClass.]. ] ensure: [copyOfOldClass ifNotNil: [copyOfOldClass superclass removeSubclass: copyOfOldClass]. Behavior flushObsoleteSubclasses. ]. ^newClass! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 22:57' prior: 18572019! superclass: newSuper subclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class." | env | env := EnvironmentRequest signal ifNil: [newSuper environment]. ^self name: t inEnvironment: env subclassOf: newSuper type: newSuper typeOfClass instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:01' prior: 50629912! superclass: aClass variableByteSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable byte-sized nonpointer variables." | oldClassOrNil actualType env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a byte subclass of a class with named fields']. (aClass isVariable and: [aClass isWords]) ifTrue: [^self error: 'cannot make a byte subclass of a class with word fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a byte subclass of a class with pointer fields']. oldClassOrNil := aClass environment at: t ifAbsent:[nil]. actualType := (oldClassOrNil notNil and: [oldClassOrNil typeOfClass == #compiledMethod]) ifTrue: [#compiledMethod] ifFalse: [#bytes]. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: actualType instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:03' prior: 18573442! superclass: aClass variableSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #variable instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18574098! superclass: aClass variableWordSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable word-sized nonpointer variables." | env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a word subclass of a class with named fields']. (aClass isVariable and: [aClass isBytes]) ifTrue: [^self error: 'cannot make a word subclass of a class with byte fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a word subclass of a class with pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #words instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18575028! superclass: aClass weakSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class (the receiver) in which the subclass is to have weak indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #weak instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! "Kernel"! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 59135029! ambiguousSelector: aString inRange: anInterval | correctedSelector userSelection offset intervalWithOffset | self interactive ifFalse: [ "In non interactive mode, compile with backward comapatibility: $- is part of literal argument" Transcript cr; store: encoder classEncoding; nextPutAll:#'>>';store: encoder selector; show: ' would send ' , token , '-'. ^super ambiguousSelector: aString inRange: anInterval]. "handle the text selection" userSelection := cue requestor selectionInterval. intervalWithOffset := anInterval first + requestorOffset to: anInterval last + requestorOffset. cue requestor selectFrom: intervalWithOffset first to: intervalWithOffset last. cue requestor select. "Build the menu with alternatives" correctedSelector := AmbiguousSelector signalName: aString inRange: intervalWithOffset. correctedSelector ifNil: [^self fail]. "Execute the selected action" offset := self substituteWord: correctedSelector wordInterval: intervalWithOffset offset: 0. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + offset. token := (correctedSelector readStream upTo: Character space) asSymbol! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 38558136! collectTemporaryDeclarationsFrom: methodNode | tempsMarks str | tempsMarks := OrderedCollection new. str := cue requestor text asString. methodNode accept: (ParseNodeEnumerator ofBlock: [ :aNode | | mark | (aNode class canUnderstand: #tempsMark) ifTrue: [mark := aNode tempsMark. (mark notNil and: [ mark between: 1 and: str size ] and: [ (str at: mark) = $| ]) ifTrue: [ tempsMarks addLast: aNode ]]]). (tempsMark notNil and: [ tempsMark between: 1 and: str size ] and: [ (str at: tempsMark) = $| ]) ifTrue: [ tempsMarks addLast: self ]. ^ tempsMarks sorted: [ :a :b | a tempsMark > b tempsMark ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 52096606! correctSelector: proposedKeyword wordIntervals: spots exprInterval: expInt ifAbort: abortAction "Correct the proposedKeyword to some selector symbol, correcting the original text if such action is indicated. abortAction is invoked if the proposedKeyword couldn't be converted into a valid selector. Spots is an ordered collection of intervals within the test stream of the for each of the keyword parts." | correctSelector userSelection | "If we can't ask the user, assume that the keyword will be defined later" self interactive ifFalse: [^proposedKeyword asSymbol]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spots first first to: spots last last. cue requestor select. correctSelector := UnknownSelector name: proposedKeyword. correctSelector ifNil: [^abortAction value]. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. self substituteSelector: correctSelector keywords wordIntervals: spots. ^(proposedKeyword last ~~ $: and: [correctSelector last == $:]) ifTrue: [abortAction value] ifFalse: [correctSelector]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 33907242! correctVariable: proposedVariable interval: spot "Correct the proposedVariable to a known variable, or declare it as a new variable if such action is requested. We support declaring lowercase variables as temps or inst-vars, and uppercase variables as Globals or ClassVars, depending on whether the context is nil (class=UndefinedObject). Spot is the interval within the test stream of the variable. rr 3/4/2004 10:26 : adds the option to define a new class. " "Check if this is an i-var, that has been corrected already (ugly)" "Display the pop-up menu" | binding userSelection action | (encoder classEncoding instVarNames includes: proposedVariable) ifTrue: [^InstanceVariableNode new name: proposedVariable index: (encoder classEncoding allInstVarNames indexOf: proposedVariable)]. "If we can't ask the user for correction, make it undeclared" self interactive ifFalse: [^encoder undeclared: proposedVariable]. "First check to see if the requestor knows anything about the variable" (binding := cue requestor bindingOf: proposedVariable) ifNotNil: [^encoder global: binding name: proposedVariable]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spot first to: spot last. cue requestor select. "Build the menu with alternatives" action := UndeclaredVariable signalFor: self name: proposedVariable inRange: spot. action ifNil: [^self fail]. "Execute the selected action" cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. ^action value! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:19' prior: 34172921! declareUndeclaredTemps: methodNode "Declare any undeclared temps, declaring them at the smallest enclosing scope." | undeclared userSelection blocksToVars | (undeclared := encoder undeclaredTemps) isEmpty ifTrue: [^self]. userSelection := cue requestor selectionInterval. blocksToVars := IdentityDictionary new. undeclared do: [:var| (blocksToVars at: (var tag == #method ifTrue: [methodNode block] ifFalse: [methodNode accept: (VariableScopeFinder new ofVariable: var)]) ifAbsentPut: [SortedCollection new]) add: var name]. (blocksToVars removeKey: methodNode block ifAbsent: []) ifNotNil: [:rootVars| rootVars do: [:varName| self pasteTempAtMethodLevel: varName]]. (blocksToVars keys sorted: [:a :b| a tempsMark < b tempsMark]) do: [:block| | decl | decl := (blocksToVars at: block) reduce: [:a :b| a, ' ', b]. block temporaries isEmpty ifTrue: [self substituteWord: ' | ', decl, ' |' wordInterval: (block tempsMark + 1 to: block tempsMark) offset: requestorOffset] ifFalse: [self substituteWord: decl, ' ' wordInterval: (block tempsMark to: block tempsMark - 1) offset: requestorOffset]]. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + requestorOffset. ReparseAfterSourceEditing signal! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 11:45' prior: 37183770! defineClass: className "prompts the user to define a new class, asks for it's category, and lets the users edit further the definition" | sym cat def d2 | sym := className asSymbol. cat := UIManager default request: 'Enter class category : ' initialAnswer: self encoder classEncoding theNonMetaClass category. cat ifEmpty: [cat := 'Unknown']. def := 'Object subclass: #' , sym , ' instanceVariableNames: '''' classVariableNames: '''' poolDictionaries: '''' category: ''' , cat , ''''. d2 := UIManager default request: 'Edit class definition : ' initialAnswer: def. d2 ifEmpty: [d2 := def]. Compiler evaluate: d2. ^ encoder global: (cue environment bindingOf: sym) name: sym! ! !Parser methodsFor: 'primitives' stamp: 'cwp 12/27/2012 11:46' prior: 37184567! externalFunctionDeclaration "Parse the function declaration for a call to an external library." | descriptorClass callType modifier retType externalName args argType module fn | descriptorClass := cue environment valueOf: #ExternalFunction ifAbsent: [^ false]. callType := descriptorClass callingConventionFor: here. callType == nil ifTrue:[^false]. [modifier := descriptorClass callingConventionModifierFor: token. modifier notNil] whileTrue: [self advance. callType := callType bitOr: modifier]. "Parse return type" self advance. retType := self externalType: descriptorClass. retType == nil ifTrue:[^self expected:'return type']. "Parse function name or index" externalName := here. (self match: #string) ifTrue:[externalName := externalName asSymbol] ifFalse:[(self match:#number) ifFalse:[^self expected:'function name or index']]. (self matchToken: #'(') ifFalse:[^self expected:'argument list']. args := WriteStream on: Array new. [here == #')'] whileFalse:[ argType := self externalType: descriptorClass. argType == nil ifTrue:[^self expected:'argument']. argType isVoid & argType isPointerType not ifFalse:[args nextPut: argType]. ]. (self matchToken: #')') ifFalse:[^self expected:')']. (self matchToken: 'module:') ifTrue:[ module := here. (self match: #string) ifFalse:[^self expected: 'String']. module := module asSymbol]. Smalltalk at: #ExternalLibraryFunction ifPresent:[:xfn| fn := xfn name: externalName module: module callType: callType returnType: retType argumentTypes: args contents. self allocateLiteral: fn. ]. (self matchToken: 'error:') ifTrue: [| errorCodeVariable | errorCodeVariable := here. (hereType == #string or: [hereType == #word]) ifFalse:[^self expected: 'error code (a variable or string)']. self advance. self addPragma: (Pragma keyword: #primitive:error: arguments: (Array with: 120 with: errorCodeVariable)). fn ifNotNil: [fn setErrorCodeName: errorCodeVariable]] ifFalse: [self addPragma: (Pragma keyword: #primitive: arguments: #(120))]. ^true ! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:19' prior: 58306169! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:22' prior: 58137223! notify: string at: location cue requestor isNil ifTrue: [(encoder == self or: [encoder isNil]) ifTrue: [^ self fail "failure setting up syntax error"]. SyntaxErrorNotification inClass: encoder classEncoding category: cue category withCode: (source contents asText copyReplaceFrom: location to: location - 1 with: ((string , ' ->') asText allBold addAttribute: TextColor red; yourself)) doitFlag: doitFlag errorMessage: string location: location] ifFalse: [cue requestor notify: string , ' ->' at: location in: source]. ^self fail! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:17' prior: 34177108! pasteTempAtMethodLevel: name | insertion delta theTextString characterBeforeMark | theTextString := cue requestor text string. characterBeforeMark := theTextString at: tempsMark-1 ifAbsent: [$ ]. (theTextString at: tempsMark) = $| ifTrue: [ "Paste it before the second vertical bar" insertion := name, ' '. characterBeforeMark isSeparator ifFalse: [ insertion := ' ', insertion]. delta := 0. ] ifFalse: [ "No bars - insert some with CR, tab" insertion := '| ' , name , ' |',String cr. delta := 2. "the bar and CR" characterBeforeMark = Character tab ifTrue: [ insertion := insertion , String tab. delta := delta + 1. "the tab" ]. ]. tempsMark := tempsMark + (self substituteWord: insertion wordInterval: (tempsMark to: tempsMark-1) offset: 0) - delta! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:16' prior: 52095305! queryUndefined | varStart varName | varName := parseNode key. varStart := self endOfLastToken + requestorOffset - varName size + 1. cue requestor selectFrom: varStart to: varStart + varName size - 1; select. (UndefinedVariable name: varName) ifFalse: [^ self fail]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38599341! removeEmptyTempDeclarationsFrom: methodNode | sourceCode madeChanges tempsMarkHolder | sourceCode := cue requestor text asString. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. madeChanges := false. tempsMarkHolder do: [ :currentBlock | | tempsMarkChar0 tempsMarkChar1 tempsMarkChar2 end start | tempsMarkChar0 := (sourceCode at: currentBlock tempsMark). tempsMarkChar1 := (sourceCode at: currentBlock tempsMark - 1). tempsMarkChar2 := (sourceCode at: currentBlock tempsMark - 2). tempsMarkChar0 = $| & tempsMarkChar1 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 1]. tempsMarkChar0 = $| & tempsMarkChar1 = $ & tempsMarkChar2 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 2]. start notNil & end notNil ifTrue: [ | lineStart lineEnd | lineStart := 1 + (sourceCode lastIndexOf: Character cr startingAt: start - 1 ifAbsent: [ 0 ]). lineEnd := sourceCode indexOf: Character cr startingAt: end + 1 ifAbsent: [ sourceCode size ]. ((sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: lineStart) >= start and: [ (sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: end + 1) > lineEnd ]) ifTrue: [ start := lineStart. end := lineEnd ]. cue requestor correctFrom: start to: end with: ''. madeChanges := true. currentBlock tempsMark: nil ] ]. madeChanges ifTrue: [ReparseAfterSourceEditing signal]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38561281! removeUnusedTemporaryNamed: temp from: str lookingAt: currentBlock movingTempMarksOf: someBlocks | start end | end := currentBlock tempsMark - 1. ["Beginning at right temp marker..." start := end - temp size + 1. end < temp size or: [ (str at: start) = $| ] or: [ temp = (str copyFrom: start to: end) and: [ ((str at: start - 1) = $| | (str at: start - 1) isSeparator) & ((str at: end + 1) = $| | (str at: end + 1) isSeparator) ] ]] whileFalse: [ "Search left for the unused temp" end := cue requestor nextTokenFrom: end direction: -1 ]. (end < temp size or: [ (str at: start) = $| ]) ifFalse: [(str at: start - 1) = $ ifTrue: [ start := start - 1 ]. cue requestor correctFrom: start to: end with: ''. someBlocks do: [ :aBlock | aBlock tempsMark: aBlock tempsMark - (end - start + 1)]. ^true ]. ^false! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 38562194! removeUnusedTemps: methodNode "Scan for unused temp names, and prompt the user about the prospect of removing each one found" | madeChanges tempsMarkHolder unusedTempNames tempMarkHoldersToChange | madeChanges := false. tempMarkHoldersToChange := OrderedCollection new. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. unusedTempNames := encoder unusedTempNames select: [ :temp | (encoder lookupVariable: temp ifAbsent: [ ]) isUndefTemp and: [ self queriedUnusedTemporaries at: temp ifAbsentPut: [UnusedVariable name: temp] ]]. tempsMarkHolder do: [ :currentBlock | tempMarkHoldersToChange add: currentBlock. unusedTempNames do: [ :temp | (self removeUnusedTemporaryNamed: temp from: cue requestor text asString lookingAt: currentBlock movingTempMarksOf: tempMarkHoldersToChange) ifTrue: [ madeChanges := true ]]]. madeChanges ifTrue: [ self removeEmptyTempDeclarationsFrom: methodNode. ReparseAfterSourceEditing signal ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 34179326! substituteWord: correctWord wordInterval: spot offset: o "Substitute the correctSelector into the (presumed interactive) receiver. Update requestorOffset based on the delta size and answer the updated offset." cue requestor correctFrom: spot first + o to: spot last + o with: correctWord. requestorOffset := requestorOffset + correctWord size - spot size. ^o + correctWord size - spot size! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34179807! temporaries " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar' ! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34180638! temporariesIn: methodSelector " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance in: methodSelector)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar'! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:11' prior: 53971863! compiledMethodFor: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method, and answers it. If receiver is not nil, then the text can refer to instance variables of that receiver (the Inspector uses this). If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted." | methodNode method theClass | theClass := (aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class. self from: textOrStream class: theClass context: aContext notifying: aRequestor. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [ methodNode generateWithTempNames ] ifFalse: [methodNode generate]. logFlag ifTrue: [SystemChangeNotifier uniqueInstance evaluated: sourceStream contents context: aContext]. ^method! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:33' prior: 34363593! format: aStream noPattern: noPattern ifFail: failBlock ^(self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]) preen! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 10:08' prior: 58306325! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Compiler methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:10' prior: 50779387! notify: aString at: location "Refer to the comment in Object|notify:." ^ cue requestor == nil ifTrue: [SyntaxErrorNotification inClass: cue getClass category: cue category withCode: (sourceStream contents copyReplaceFrom: location to: location - 1 with: aString) doitFlag: false errorMessage: aString location: location] ifFalse: [cue requestor notify: aString at: location in: sourceStream]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:34' prior: 50777201! parse: textOrStream in: aClass notifying: req "Compile the argument, textOrStream, with respect to the class, aClass, and answer the MethodNode that is the root of the resulting parse tree. Notify the argument, req, if an error occurs. The failBlock is defaulted to an empty block." self from: textOrStream class: aClass context: nil notifying: req. ^self parser parse: sourceStream cue: cue noPattern: false ifFail: []! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:09' prior: 36332471! parser parser ifNil: [parser := (cue getClass ifNil: [self class]) newParser]. ^parser! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:37' prior: 50780779! translate: aStream noPattern: noPattern ifFail: failBlock ^self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:37' prior: 19124095! translate: aStream noPattern: noPattern ifFail: failBlock parser: parser | tree | tree := parser parse: aStream cue: cue noPattern: noPattern ifFail: [^ failBlock value]. ^ tree! ! !Encoder methodsFor: 'results' stamp: 'cwp 12/27/2012 10:26' prior: 50999892! associationForClass | assoc | assoc := self environment associationAt: cue getClass name ifAbsent: [nil]. ^assoc value == cue getClass ifTrue: [assoc] ifFalse: [Association new value: cue getClass]! ! !Encoder methodsFor: 'temps' stamp: 'cwp 12/27/2012 10:25' prior: 20148386! bindTemp: name in: methodSelector "Declare a temporary; error not if a field or class variable." scopeTable at: name ifPresent:[:node| "When non-interactive raise the error only if its a duplicate" (node isTemp or:[requestor interactive]) ifTrue:[^self notify:'Name is already defined'] ifFalse:[Transcript show: '(', name, ' is shadowed in "' , cue getClass printString , '>>' , methodSelector printString , '")']]. ^self reallyBind: name! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:25' prior: 20149084! classEncoding "This is a hack so that the parser may findout what class it was parsing for when it wants to create a syntax error view." ^ cue getClass! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:39' prior: 20138819! encodeLiteral: object ^self name: object key: (cue literalScannedAs: object notifying: self) class: LiteralNode type: LdLitType set: litSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 20139010! encodeSelector: aSelector ^self name: aSelector key: aSelector class: SelectorNode type: SendType set: selectorSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 58545123! environment "Answer the environment of the current compilation context, be it in a class or global (e.g. a workspace)" ^cue environment! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 11:41' prior: 50994497! lookupInPools: varName ifFound: assocBlock ^Symbol hasInterned: varName ifTrue: [:sym| (cue bindingOf: sym) ifNil: [^false] ifNotNil: [:assoc| assocBlock value: assoc]]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 51004306! possibleNamesFor: proposedName | results | results := cue getClass possibleVariablesFor: proposedName continuedFrom: nil. ^ proposedName correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 50995012! possibleVariablesFor: proposedVariable | results | results := proposedVariable correctAgainstDictionary: scopeTable continuedFrom: nil. proposedVariable first canBeGlobalVarInitial ifTrue: [ results := cue getClass possibleVariablesFor: proposedVariable continuedFrom: results ]. ^ proposedVariable correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:42' prior: 51002830! undeclared: name | sym | requestor interactive ifTrue: [requestor requestor == #error: ifTrue: [requestor error: 'Undeclared']. ^self notify: 'Undeclared']. "Allow knowlegeable clients to squash the undeclared warning if they want (e.g. Diffing pretty printers that are simply formatting text). As this breaks compilation it should only be used by clients that want to discard the result of the compilation. To squash the warning use e.g. [Compiler format: code in: class notifying: nil decorated: false] on: UndeclaredVariableWarning do: [:ex| ex resume: false]" sym := name asSymbol. ^(UndeclaredVariableWarning new name: name selector: selector class: cue getClass) signal ifTrue: [| undeclared | undeclared := cue environment undeclared. undeclared at: sym put: nil. self global: (undeclared associationAt: sym) name: sym] ifFalse: [self global: (Association key: sym) name: sym]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:23' prior: 51006007! warnAboutShadowed: name requestor addWarning: name,' is shadowed'. selector ifNotNil: [Transcript cr; show: cue getClass name,'>>', selector, '(', name,' is shadowed)']! ! "Compiler"! !SmalltalkImage methodsFor: 'housekeeping' stamp: 'cwp 6/22/2012 15:56' prior: 58497062! cleanOutUndeclared globals undeclared removeUnreferencedKeys! ! !SmalltalkImage methodsFor: 'special objects' stamp: 'cwp 6/22/2012 09:01' prior: 40515090! recreateSpecialObjectsArray "Smalltalk recreateSpecialObjectsArray" "To external package developers: **** DO NOT OVERRIDE THIS METHOD. ***** If you are writing a plugin and need additional special object(s) for your own use, use addGCRoot() function and use own, separate special objects registry " "The Special Objects Array is an array of objects used by the Squeak virtual machine. Its contents are critical and accesses to it by the VM are unchecked, so don't even think of playing here unless you know what you are doing." | newArray | newArray := Array new: 56. "Nil false and true get used throughout the interpreter" newArray at: 1 put: nil. newArray at: 2 put: false. newArray at: 3 put: true. "This association holds the active process (a ProcessScheduler)" newArray at: 4 put: (self bindingOf: #Processor). "Numerous classes below used for type checking and instantiation" newArray at: 5 put: Bitmap. newArray at: 6 put: SmallInteger. newArray at: 7 put: ByteString. newArray at: 8 put: Array. newArray at: 9 put: Smalltalk. newArray at: 10 put: Float. newArray at: 11 put: MethodContext. newArray at: 12 put: BlockContext. newArray at: 13 put: Point. newArray at: 14 put: LargePositiveInteger. newArray at: 15 put: Display. newArray at: 16 put: Message. newArray at: 17 put: CompiledMethod. newArray at: 18 put: (self specialObjectsArray at: 18). "(low space Semaphore)" newArray at: 19 put: Semaphore. newArray at: 20 put: Character. newArray at: 21 put: #doesNotUnderstand:. newArray at: 22 put: #cannotReturn:. newArray at: 23 put: nil. "This is the process signalling low space." "An array of the 32 selectors that are compiled as special bytecodes, paired alternately with the number of arguments each takes." newArray at: 24 put: #( #+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0 ). "An array of the 255 Characters in ascii order. Cog inlines table into machine code at: prim so do not regenerate it." newArray at: 25 put: (self specialObjectsArray at: 25). newArray at: 26 put: #mustBeBoolean. newArray at: 27 put: ByteArray. newArray at: 28 put: Process. "An array of up to 31 classes whose instances will have compact headers" newArray at: 29 put: self compactClassesArray. newArray at: 30 put: (self specialObjectsArray at: 30). "(delay Semaphore)" newArray at: 31 put: (self specialObjectsArray at: 31). "(user interrupt Semaphore)" "Entries 32 - 34 unreferenced. Previously these contained prototype instances to be copied for fast initialization" newArray at: 32 put: nil. "was (Float new: 2)" newArray at: 33 put: nil. "was (LargePositiveInteger new: 4)" newArray at: 34 put: nil. "was Point new" newArray at: 35 put: #cannotInterpret:. "Note: This must be fixed once we start using context prototypes (yeah, right)" "(MethodContext new: CompiledMethod fullFrameSize)." newArray at: 36 put: (self specialObjectsArray at: 36). "Is the prototype MethodContext (unused by the VM)" newArray at: 37 put: BlockClosure. "(BlockContext new: CompiledMethod fullFrameSize)." newArray at: 38 put: (self specialObjectsArray at: 38). "Is the prototype BlockContext (unused by the VM)" "array of objects referred to by external code" newArray at: 39 put: (self specialObjectsArray at: 39). "preserve external semaphores" newArray at: 40 put: nil. "Reserved for Mutex in Cog VMs" newArray at: 41 put: nil. "Reserved for a LinkedList instance for overlapped calls in CogMT" "finalization Semaphore" newArray at: 42 put: ((self specialObjectsArray at: 42) ifNil: [Semaphore new]). newArray at: 43 put: LargeNegativeInteger. "External objects for callout. Note: Written so that one can actually completely remove the FFI." newArray at: 44 put: (self at: #ExternalAddress ifAbsent: []). newArray at: 45 put: (self at: #ExternalStructure ifAbsent: []). newArray at: 46 put: (self at: #ExternalData ifAbsent: []). newArray at: 47 put: (self at: #ExternalFunction ifAbsent: []). newArray at: 48 put: (self at: #ExternalLibrary ifAbsent: []). newArray at: 49 put: #aboutToReturn:through:. newArray at: 50 put: #run:with:in:. "51 reserved for immutability message" "newArray at: 51 put: #attemptToAssign:withIndex:." newArray at: 52 put: #(nil "nil => generic error" #'bad receiver' #'bad argument' #'bad index' #'bad number of arguments' #'inappropriate operation' #'unsupported operation' #'no modification' #'insufficient object memory' #'insufficient C memory' #'not found' #'bad method' #'internal error in named primitive machinery' #'object may move'). "53 to 55 are for Alien" newArray at: 53 put: (self at: #Alien ifAbsent: []). newArray at: 54 put: #invokeCallback:stack:registers:jmpbuf:. newArray at: 55 put: (self at: #UnsafeAlien ifAbsent: []). "Weak reference finalization" newArray at: 56 put: (self at: #WeakFinalizationList ifAbsent: []). "Now replace the interpreter's reference in one atomic operation" self specialObjectsArray becomeForward: newArray ! ! !SmalltalkImage methodsFor: 'shrinking' stamp: 'cwp 6/22/2012 15:57' prior: 37288071! unloadAllKnownPackages "Unload all packages we know how to unload and reload" "Prepare unloading" Smalltalk zapMVCprojects. Flaps disableGlobalFlaps: false. StandardScriptingSystem removeUnreferencedPlayers. Project removeAllButCurrent. #('Morphic-UserObjects' 'EToy-UserObjects' 'Morphic-Imported' ) do: [:each | SystemOrganization removeSystemCategory: each]. Smalltalk at: #ServiceRegistry ifPresent:[:aClass| SystemChangeNotifier uniqueInstance noMoreNotificationsFor: aClass. ]. World removeAllMorphs. "Go unloading" #( 'ReleaseBuilder' 'ScriptLoader' '311Deprecated' '39Deprecated' 'Universes' 'SMLoader' 'SMBase' 'Installer-Core' 'VersionNumberTests' 'VersionNumber' 'Services-Base' 'PreferenceBrowser' 'Nebraska' 'ToolBuilder-MVC' 'ST80' 'CollectionsTests' 'GraphicsTests' 'KernelTests' 'MorphicTests' 'MultilingualTests' 'NetworkTests' 'ToolsTests' 'TraitsTests' 'SystemChangeNotification-Tests' 'FlexibleVocabularies' 'EToys' 'Protocols' 'XML-Parser' 'Tests' 'SUnitGUI' 'Help-Squeak' 'HelpSystem' 'SystemReporter' ) do: [:pkgName| (MCPackage named: pkgName) unload. MCMcmUpdater disableUpdatesOfPackage: pkgName. ]. "Traits use custom unload" Smalltalk at: #Trait ifPresent:[:aClass| aClass unloadTraits]. "Post-unload cleanup" MCWorkingCopy flushObsoletePackageInfos. SystemOrganization removeSystemCategory: 'UserObjects'. Presenter defaultPresenterClass: nil. World dumpPresenter. ScheduledControllers := nil. Preferences removePreference: #allowEtoyUserCustomEvents. SystemOrganization removeEmptyCategories. ChangeSet removeChangeSetsNamedSuchThat:[:cs | (cs == ChangeSet current) not]. globals undeclared removeUnreferencedKeys. StandardScriptingSystem initialize. MCFileBasedRepository flushAllCaches. MCDefinition clearInstances. Behavior flushObsoleteSubclasses. ChangeSet current clear. ChangeSet current name: 'Unnamed1'. Smalltalk flushClassNameCache. Smalltalk at: #Browser ifPresent:[:br| br initialize]. DebuggerMethodMap voidMapCache. DataStream initialize. AppRegistry removeObsolete. FileServices removeObsolete. Preferences removeObsolete. TheWorldMenu removeObsolete. Smalltalk garbageCollect. Symbol compactSymbolTable. TheWorldMainDockingBar updateInstances. MorphicProject defaultFill: (Color gray: 0.9). World color: (Color gray: 0.9). ! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34' prior: 40472775! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:27' prior: 40496770! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !SystemDictionary methodsFor: 'dictionary access' stamp: 'cwp 6/22/2012 15:58' prior: 30574136! at: aKey put: anObject "Override from Dictionary to check Undeclared and fix up references to undeclared variables." | index element | (self includesKey: aKey) ifFalse: [self declare: aKey from: (self at: #Undeclared). self flushClassNameCache]. super at: aKey put: anObject. ^ anObject! ! "System"! CodeHolder subclass: #Browser instanceVariableNames: 'environment systemOrganizer classOrganizer metaClassOrganizer editSelection metaClassIndicated selectedSystemCategory selectedClassName selectedMessageName selectedMessageCategoryName' classVariableNames: 'ListClassesHierarchically RecentClasses' poolDictionaries: '' category: 'Tools-Browser'! !Browser commentStamp: 'cwp 12/27/2012 11:09' prior: 36419432! I represent a query path into the class descriptions, the software of the system.! !Browser methodsFor: 'accessing' stamp: 'cwp 6/24/2012 23:20'! selectEnvironment: anEnvironment environment := anEnvironment. systemOrganizer := environment organization! ! !Browser methodsFor: 'system category list' stamp: 'cwp 6/24/2012 23:06' prior: 36467357! From noreply at buildbot.pypy.org Tue Mar 25 15:02:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 15:02:01 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/4d330c8e6b92 Message-ID: <20140325140201.70ED81C30DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70276:ade60393992a Date: 2014-03-25 14:18 +0100 http://bitbucket.org/pypy/pypy/changeset/ade60393992a/ Log: import stmgc/4d330c8e6b92 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -5575626c8253 +4d330c8e6b92 diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -581,6 +581,16 @@ */ struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num); + switch (pseg->transaction_state) { + case TS_REGULAR: + break; + case TS_INEVITABLE: + stm_fatalerror("abort: transaction_state == TS_INEVITABLE"); + default: + stm_fatalerror("abort: bad transaction_state == %d", + (int)pseg->transaction_state); + } + /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -606,15 +616,6 @@ assert(_has_mutex()); dprintf(("~~~ ABORT\n")); - switch (STM_PSEGMENT->transaction_state) { - case TS_REGULAR: - break; - case TS_INEVITABLE: - stm_fatalerror("abort: transaction_state == TS_INEVITABLE"); - default: - stm_fatalerror("abort: bad transaction_state == %d", - (int)STM_PSEGMENT->transaction_state); - } assert(STM_PSEGMENT->running_pthread == pthread_self()); abort_data_structures_from_segment_num(STM_SEGMENT->segment_num); @@ -677,9 +678,10 @@ s_mutex_unlock(); } -void stm_become_globally_unique_transaction(const char *msg) +void stm_become_globally_unique_transaction(stm_thread_local_t *tl, + const char *msg) { - stm_become_inevitable(msg); /* may still abort */ + stm_become_inevitable(tl, msg); /* may still abort */ s_mutex_lock(); synchronize_all_threads(STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE); diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c --- a/rpython/translator/stm/src_stm/stm/forksupport.c +++ b/rpython/translator/stm/src_stm/stm/forksupport.c @@ -61,7 +61,7 @@ bool was_in_transaction = _stm_in_transaction(this_tl); if (was_in_transaction) { - stm_become_inevitable("fork"); + stm_become_inevitable(this_tl, "fork"); /* Note that the line above can still fail and abort, which should be fine */ } @@ -191,6 +191,8 @@ #ifndef NDEBUG pr->running_pthread = pthread_self(); #endif + pr->pub.running_thread->shadowstack = ( + pr->shadowstack_at_start_of_transaction); stm_abort_transaction(); } } diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -276,7 +276,9 @@ /* Turn the current transaction inevitable. The 'jmpbuf' passed to STM_START_TRANSACTION() is not going to be used any more after this call (but the stm_become_inevitable() itself may still abort). */ -static inline void stm_become_inevitable(const char* msg) { +static inline void stm_become_inevitable(stm_thread_local_t *tl, + const char* msg) { + assert(STM_SEGMENT->running_thread == tl); if (STM_SEGMENT->jmpbuf_ptr != NULL) _stm_become_inevitable(msg); } @@ -331,7 +333,8 @@ transaction is running concurrently. Avoid as much as possible. Other transactions will continue running only after this transaction commits. */ -void stm_become_globally_unique_transaction(const char *msg); +void stm_become_globally_unique_transaction(stm_thread_local_t *tl, + const char *msg); /* ==================== END ==================== */ From noreply at buildbot.pypy.org Tue Mar 25 15:02:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 15:02:02 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: add the extra argument Message-ID: <20140325140202.C00711C30DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70277:13baeba618bc Date: 2014-03-25 14:32 +0100 http://bitbucket.org/pypy/pypy/changeset/13baeba618bc/ Log: add the extra argument diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -127,10 +127,11 @@ except IndexError: info = "rstm.become_inevitable" # cannot insert it in 'llop' string_literal = c_string_constant(info) - return 'stm_become_inevitable(%s);' % (string_literal,) + return 'stm_become_inevitable(&stm_thread_local, %s);' % (string_literal,) def stm_become_globally_unique_transaction(funcgen, op): - return 'stm_become_globally_unique_transaction("for the JIT");' + return ('stm_become_globally_unique_transaction(&stm_thread_local,' + ' "for the JIT");') def stm_push_root(funcgen, op): arg0 = funcgen.expr(op.args[0]) @@ -276,14 +277,6 @@ ## return '%s = stm_pointer_equal((gcptr)%s, (gcptr)%s);' % ( ## result, args[0], args[1]) -##def stm_become_inevitable(funcgen, op): -## try: -## info = op.args[0].value -## except IndexError: -## info = "rstm.become_inevitable" # cannot insert it in 'llop' -## string_literal = c_string_constant(info) -## return 'stm_become_inevitable(%s);' % (string_literal,) - ##def stm_stop_all_other_threads(funcgen, op): ## return 'stm_stop_all_other_threads();' diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -23,7 +23,8 @@ stm_commit_transaction(); } else { - stm_become_inevitable("commit_if_not_atomic in atomic"); + stm_become_inevitable(&stm_thread_local, + "commit_if_not_atomic in atomic"); } errno = e; } From noreply at buildbot.pypy.org Tue Mar 25 15:02:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 15:02:04 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix by a quick hack (that can be justified too): give transactionsafe=True Message-ID: <20140325140204.0DC911C30DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70278:6d63887bcf1a Date: 2014-03-25 15:01 +0100 http://bitbucket.org/pypy/pypy/changeset/6d63887bcf1a/ Log: Fix by a quick hack (that can be justified too): give transactionsafe=True to the calls that actually occur outside a transaction. diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -127,6 +127,12 @@ has_callback) # because the callback can do it assert not (elidable_function and random_effects_on_gcobjs) + if not _nowrapper and invoke_around_handlers: + # enable 'transactionsafe' so that the call to funcptr, which is + # really done outside a transaction, doesn't force stm/inevitable.py + # to insert a spurious stm_become_inevitable() + transactionsafe = True + funcptr = lltype.functionptr(ext_type, name, external='C', transactionsafe=transactionsafe, compilation_info=compilation_info, From noreply at buildbot.pypy.org Tue Mar 25 15:35:28 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 25 Mar 2014 15:35:28 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Fixed tests (except test_strategies.py which is not functional). Message-ID: <20140325143528.DF59B1C066C@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r696:fdf0149a0aae Date: 2014-03-25 15:35 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/fdf0149a0aae/ Log: Fixed tests (except test_strategies.py which is not functional). Added BootstrappedObjSpace in util.py for tests. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -439,7 +439,7 @@ return self.w_class def __str__(self): - if isinstance(self, W_PointersObject) and self.has_shadow(): + if isinstance(self, W_PointersObject) and self.has_shadow() and self.shadow.has_getname: return self._get_shadow().getname() else: name = None @@ -627,7 +627,7 @@ def as_context_get_shadow(self, space): from spyvm.shadow import ContextPartShadow # XXX TODO should figure out itself if its method or block context - if self._get_shadow() is None: + if not isinstance(self.shadow, ContextPartShadow): if ContextPartShadow.is_block_context(self, space): return self.as_blockcontext_get_shadow(space) return self.as_methodcontext_get_shadow(space) diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -53,7 +53,7 @@ MaskTable.append(r_uint((2 ** (i + 1)) - 1)) AllOnes = r_uint(0xFFFFFFFF) - def sync_cache(self): + def attach_shadow(self): pass def intOrIfNil(self, w_int, i): @@ -734,7 +734,7 @@ def intOrIfNil(self, w_int, i): return intOrIfNil(self.space, w_int, i) - def sync_cache(self): + def attach_shadow(self): self.invalid = True if self.size() < 5: return @@ -756,8 +756,8 @@ w_offset = self.fetch(4) assert isinstance(w_offset, model.W_PointersObject) if not w_offset is self.space.w_nil: - self.offsetX = self.intOrIfNil(w_offset._fetch(self.space, 0), 0) - self.offsetY = self.intOrIfNil(w_offset._fetch(self.space, 1), 0) + self.offsetX = self.intOrIfNil(w_offset.fetch(self.space, 0), 0) + self.offsetY = self.intOrIfNil(w_offset.fetch(self.space, 1), 0) self.pixPerWord = 32 / self.depth self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 if self.w_bits.size() < (self.pitch * self.height): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -11,7 +11,8 @@ can be attached at run-time to any Smalltalk object. """ _attrs_ = ['_w_self', 'space'] - + has_getname = True + def __init__(self, space, w_self): self.space = space self._w_self = w_self @@ -40,6 +41,7 @@ class ListStorageShadow(AbstractShadow): _attrs_ = ['storage'] + has_getname = False def __init__(self, space, w_self, size): AbstractShadow.__init__(self, space, w_self) @@ -60,6 +62,7 @@ class WeakListStorageShadow(AbstractShadow): _attrs_ = ['storage'] + has_getname = False def __init__(self, space, w_self, size): AbstractShadow.__init__(self, space, w_self) @@ -79,6 +82,7 @@ _attrs_ = ['version'] import_from_mixin(version.VersionMixin) version = None + has_getname = True def __init__(self, space, w_self): ListStorageShadow.__init__(self, space, w_self, 0) @@ -337,7 +341,7 @@ "NOT_RPYTHON" # this is only for testing. if self._s_methoddict is None: w_methoddict = model.W_PointersObject(self.space, None, 2) - w_methoddict._store(self.space, 1, model.W_PointersObject(self.space, None, 0)) + w_methoddict.store(self.space, 1, model.W_PointersObject(self.space, None, 0)) self._s_methoddict = w_methoddict.as_methoddict_get_shadow(self.space) self.s_methoddict().sync_method_cache() self.s_methoddict().invalid = False @@ -450,6 +454,12 @@ AbstractRedirectingShadow.__init__(self, space, w_self) self.instances_w = {} + def copy_field_from(self, n0, other_shadow): + try: + AbstractRedirectingShadow.copy_field_from(self, n0, other_shadow) + except error.SenderChainManipulation, e: + assert e.s_context == self + def copy_from(self, other_shadow): # Some fields have to be initialized before the rest, to ensure correct initialization. privileged_fields = self.fields_to_copy_first() @@ -463,6 +473,9 @@ if n0 not in privileged_fields: self.copy_field_from(n0, other_shadow) + def fields_to_copy_first(self): + return [] + @staticmethod def is_block_context(w_pointers, space): method_or_argc = w_pointers.fetch(space, constants.MTHDCTX_METHOD) diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -13,15 +13,12 @@ from rpython.jit.metainterp.test.test_ajit import LLJitMixin - +from .util import bootstrap_class from spyvm import model, interpreter, primitives, shadow from spyvm import objspace, squeakimage from spyvm.tool.analyseimage import create_squeakimage, create_testimage from rpython.rlib.streamio import open_file_as_stream - -mockclass = objspace.bootstrap_class - space = objspace.ObjSpace() # expose the bytecode's values as global constants. diff --git a/spyvm/test/test_bitblt.py b/spyvm/test/test_bitblt.py --- a/spyvm/test/test_bitblt.py +++ b/spyvm/test/test_bitblt.py @@ -1,7 +1,8 @@ from spyvm import model, shadow, constants, interpreter, objspace from spyvm.plugins import bitblt +from .util import BootstrappedObjSpace -space = objspace.ObjSpace() +space = BootstrappedObjSpace() # copy from test_miniimage def w(any): diff --git a/spyvm/test/test_bootstrappedimage.py b/spyvm/test/test_bootstrappedimage.py --- a/spyvm/test/test_bootstrappedimage.py +++ b/spyvm/test/test_bootstrappedimage.py @@ -1,6 +1,6 @@ import py from spyvm import squeakimage, model, constants -from spyvm import interpreter, shadow, objspace +from spyvm import interpreter, shadow from spyvm.test import test_miniimage as tools from spyvm.test.test_miniimage import perform, w @@ -10,7 +10,7 @@ def find_symbol_in_methoddict_of(string, s_class): s_methoddict = s_class.s_methoddict() - s_methoddict.sync_cache() + s_methoddict.sync_method_cache() methoddict_w = s_methoddict.methoddict for each in methoddict_w.keys(): if each.as_string() == string: diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1,13 +1,15 @@ import py +from .util import bootstrap_class as _bootstrap_class from spyvm import model, interpreter, primitives, shadow from spyvm import objspace, wrapper, constants +from .util import BootstrappedObjSpace -def mockclass(space, instsize, w_superclass=None, w_metaclass=None, +def bootstrap_class(space, instsize, w_superclass=None, w_metaclass=None, name='?', format=shadow.POINTERS, varsized=True): - return objspace.bootstrap_class(space, instsize, w_superclass, w_metaclass, + return _bootstrap_class(space, instsize, w_superclass, w_metaclass, name, format, varsized) -space = objspace.ObjSpace() +space = BootstrappedObjSpace() interp = interpreter.Interpreter(space) def step_in_interp(ctxt): # due to missing resets in between tests interp._loop = False @@ -56,16 +58,12 @@ space.w_special_selectors.atput0(space, index, symbol) assert space.get_special_selector(methname) is symbol s_class.installmethod(symbol, prim_meth) - - assert space.w_nil.shadow is None try: func(active_context) if active_context else func() finally: # Uninstall those methods: - assert space.w_nil.shadow is None for (w_class, _, _, methname) in methods: s_class = w_class.as_class_get_shadow(space) - s_class.update() s_class.s_methoddict().update() def fakesymbol(s, _cache={}): @@ -147,7 +145,7 @@ def test_pushReceiverVariableBytecode(bytecode = (pushReceiverVariableBytecode(0) + pushReceiverVariableBytecode(1) + pushReceiverVariableBytecode(2))): - w_demo = mockclass(space, 3).as_class_get_shadow(space).new() + w_demo = bootstrap_class(space, 3).as_class_get_shadow(space).new() w_demo.store(space, 0, "egg") w_demo.store(space, 1, "bar") w_demo.store(space, 2, "baz") @@ -182,7 +180,7 @@ fakesymbol("c")] def test_pushLiteralVariableBytecode(bytecode=pushLiteralVariableBytecode(0)): - w_association = mockclass(space, 2).as_class_get_shadow(space).new() + w_association = bootstrap_class(space, 2).as_class_get_shadow(space).new() w_association.store(space, 0, "mykey") w_association.store(space, 1, "myvalue") w_frame, s_frame = new_frame(bytecode) @@ -192,7 +190,7 @@ def test_storeAndPopReceiverVariableBytecode(bytecode=storeAndPopReceiverVariableBytecode, popped=True): - shadow = mockclass(space, 8).as_class_get_shadow(space) + shadow = bootstrap_class(space, 8).as_class_get_shadow(space) for index in range(8): w_object = shadow.new() w_frame, s_frame = new_frame(pushConstantTrueBytecode + bytecode(index)) @@ -365,8 +363,8 @@ assert s_frame.stack() == [] def test_bytecodePrimNew(): - w_fakeclassclass = mockclass(space, 10, name='fakeclassclass') - w_fakeclass = mockclass(space, 1, name='fakeclass', varsized=False, + w_fakeclassclass = bootstrap_class(space, 10, name='fakeclassclass') + w_fakeclass = bootstrap_class(space, 1, name='fakeclass', varsized=False, w_metaclass=w_fakeclassclass) w_frame, s_frame = new_frame(bytecodePrimNew) s_frame.push(w_fakeclass) @@ -380,8 +378,8 @@ assert w_fakeinst.size() == 1 def test_bytecodePrimNewWithArg(): - w_fakeclassclass = mockclass(space, 10, name='fakeclassclass') - w_fakeclass = mockclass(space, 1, name='fakeclass', varsized=True, + w_fakeclassclass = bootstrap_class(space, 10, name='fakeclassclass') + w_fakeclass = bootstrap_class(space, 1, name='fakeclass', varsized=True, w_metaclass=w_fakeclassclass) w_frame, s_frame = new_frame(bytecodePrimNewWithArg) s_frame.push(w_fakeclass) @@ -396,7 +394,7 @@ assert w_fakeinst.size() == 3 def test_bytecodePrimSize(): - w_fakeclass = mockclass(space, 2, name='fakeclass', varsized=True) + w_fakeclass = bootstrap_class(space, 2, name='fakeclass', varsized=True) w_fakeinst = w_fakeclass.as_class_get_shadow(space).new(5) w_frame, s_frame = new_frame(bytecodePrimSize) s_frame.push(w_fakeinst) @@ -440,13 +438,13 @@ assert s_active_context.stack() == [result] def test_sendLiteralSelectorBytecode(): - w_class = mockclass(space, 0) + w_class = bootstrap_class(space, 0) w_object = w_class.as_class_get_shadow(space).new() sendBytecodesTest(w_class, w_object, sendLiteralSelectorBytecode(0)) def test_fibWithArgument(): bytecode = ''.join(map(chr, [ 16, 119, 178, 154, 118, 164, 11, 112, 16, 118, 177, 224, 112, 16, 119, 177, 224, 176, 124 ])) - shadow = mockclass(space, 0).as_class_get_shadow(space) + shadow = bootstrap_class(space, 0).as_class_get_shadow(space) method = model.W_CompiledMethod(len(bytecode)) method.literalsize = 1 method.bytes = bytecode @@ -565,7 +563,7 @@ test_pushLiteralVariableBytecode(extendedPushBytecode + chr((3<<6) + 0)) def storeAssociation(bytecode): - w_association = mockclass(space, 2).as_class_get_shadow(space).new() + w_association = bootstrap_class(space, 2).as_class_get_shadow(space).new() w_association.store(space, 0, "mykey") w_association.store(space, 1, "myvalue") w_frame, s_frame = new_frame(pushConstantOneBytecode + bytecode) @@ -587,7 +585,7 @@ def test_callPrimitiveAndPush_fallback(): w_frame, s_frame = new_frame(bytecodePrimAdd) - shadow = mockclass(space, 0).as_class_get_shadow(space) + shadow = bootstrap_class(space, 0).as_class_get_shadow(space) w_method = model.W_CompiledMethod(0) w_method.argsize = 1 w_method.tempsize = 1 @@ -623,14 +621,14 @@ space.w_false, space.w_true] def test_singleExtendedSendBytecode(): - w_class = mockclass(space, 0) + w_class = bootstrap_class(space, 0) w_object = w_class.as_class_get_shadow(space).new() sendBytecodesTest(w_class, w_object, singleExtendedSendBytecode + chr((0<<5)+0)) def test_singleExtendedSuperBytecode(bytecode=singleExtendedSuperBytecode + chr((0<<5) + 0)): - w_supersuper = mockclass(space, 0) - w_super = mockclass(space, 0, w_superclass=w_supersuper) - w_class = mockclass(space, 0, w_superclass=w_super) + w_supersuper = bootstrap_class(space, 0) + w_super = bootstrap_class(space, 0, w_superclass=w_supersuper) + w_class = bootstrap_class(space, 0, w_superclass=w_super) w_object = w_class.as_class_get_shadow(space).new() # first call method installed in w_class bytecodes = singleExtendedSendBytecode + chr(0) @@ -667,12 +665,12 @@ assert s_caller_context.stack() == [] def test_secondExtendedSendBytecode(): - w_class = mockclass(space, 0) + w_class = bootstrap_class(space, 0) w_object = w_class.as_class_get_shadow(space).new() sendBytecodesTest(w_class, w_object, secondExtendedSendBytecode + chr(0)) def test_doubleExtendedDoAnythinBytecode(): - w_class = mockclass(space, 0) + w_class = bootstrap_class(space, 0) w_object = w_class.as_class_get_shadow(space).new() sendBytecodesTest(w_class, w_object, doubleExtendedDoAnythingBytecode + chr((0<<5) + 0) + chr(0)) @@ -802,7 +800,7 @@ def test_bc_primBytecodeAt_with_instvars(): # ^ self at: 1 - w_fakeclass = mockclass(space, 1, name='fakeclass', varsized=True) + w_fakeclass = bootstrap_class(space, 1, name='fakeclass', varsized=True) w_fakeinst = w_fakeclass.as_class_get_shadow(space).new(1) w_fakeinst.store(space, 0, space.wrap_char("a")) # static slot 0: instance variable w_fakeinst.store(space, 1, space.wrap_char("b")) # varying slot 1 @@ -817,7 +815,7 @@ def test_bc_primBytecodeAtPut_with_instvars(): # ^ self at: 1 put: #b - w_fakeclass = mockclass(space, 1, name='fakeclass', varsized=True) + w_fakeclass = bootstrap_class(space, 1, name='fakeclass', varsized=True) w_fakeinst = w_fakeclass.as_class_get_shadow(space).new(1) w_fakeinst.store(space, 0, space.wrap_char("a")) # static slot 0: instance variable w_fakeinst.store(space, 1, space.wrap_char("a")) # varying slot 1 diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -1,7 +1,7 @@ import py import operator from spyvm import squeakimage, model, constants, error -from spyvm import interpreter, shadow, objspace, primitives +from spyvm import interpreter, shadow, primitives from spyvm.test import test_miniimage as tools from spyvm.test.test_miniimage import perform, w from spyvm.test.test_primitives import MockFrame @@ -10,10 +10,9 @@ space, interp = tools.setup_module(tools, filename='bootstrapped.image') - def find_symbol_in_methoddict_of(string, s_class): s_methoddict = s_class.s_methoddict() - s_methoddict.sync_cache() + s_methoddict.sync_method_cache() methoddict_w = s_methoddict.methoddict for each in methoddict_w.keys(): if each.as_string() == string: @@ -30,7 +29,7 @@ initialize_class(w("string").getclass(tools.space)) def perform_primitive(rcvr, w_selector, *args): - code = rcvr.getclass(space).shadow.lookup(w_selector).primitive() + code = rcvr.class_shadow(space).lookup(w_selector).primitive() assert code func = primitives.prim_holder.prim_table[code] s_frame = MockFrame([rcvr] + list(args)).as_context_get_shadow(space) diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -2,16 +2,12 @@ # NOT relying on order of methods # using setup_module(module) now import py -from spyvm import squeakimage -from spyvm import model -from spyvm import constants -from spyvm import interpreter -from spyvm import shadow -from spyvm import objspace +from spyvm import squeakimage, model, constants, interpreter, shadow, objspace +from .util import BootstrappedObjSpace # lazy initialization of test data, ie ImageReader and Float class def setup_module(module, filename='mini.image'): - space = objspace.ObjSpace() + space = BootstrappedObjSpace() from spyvm.tool.analyseimage import image_dir module.mini_image = image_dir.join(filename) module.reader = open_miniimage(space) @@ -113,27 +109,21 @@ assert str(w0) == "a ProcessorScheduler" def test_special_classes0(): + def test_classname(so_index, expected_name): + obj = image.special(so_index) + obj.as_class_get_shadow(space) + assert str(obj) == expected_name image = get_image() # w = image.special(constants.SO_BITMAP_CLASS) # assert str(w) == "Bitmap class" - w = image.special(constants.SO_SMALLINTEGER_CLASS) - assert str(w) == "SmallInteger class" - w = image.special(constants.SO_STRING_CLASS) - assert str(w) == "String class" - w = image.special(constants.SO_ARRAY_CLASS) - assert str(w) == "Array class" - w = image.special(constants.SO_FLOAT_CLASS) - assert str(w) == "Float class" - w = image.special(constants.SO_METHODCONTEXT_CLASS) - assert str(w) == "MethodContext class" - w = image.special(constants.SO_BLOCKCONTEXT_CLASS) - assert str(w) == "BlockContext class" - w = image.special(constants.SO_POINT_CLASS) - assert str(w) == "Point class" - w = image.special(constants.SO_LARGEPOSITIVEINTEGER_CLASS) - assert str(w) == "LargePositiveInteger class" - w = image.special(constants.SO_MESSAGE_CLASS) - assert str(w) == "Message class" + test_classname(constants.SO_SMALLINTEGER_CLASS, "SmallInteger class") + test_classname(constants.SO_ARRAY_CLASS, "Array class") + test_classname(constants.SO_FLOAT_CLASS, "Float class") + test_classname(constants.SO_METHODCONTEXT_CLASS, "MethodContext class") + test_classname(constants.SO_BLOCKCONTEXT_CLASS, "BlockContext class") + test_classname(constants.SO_POINT_CLASS, "Point class") + test_classname(constants.SO_LARGEPOSITIVEINTEGER_CLASS, "LargePositiveInteger class") + test_classname(constants.SO_MESSAGE_CLASS, "Message class") # to be continued @@ -406,7 +396,7 @@ from spyvm import primitives w_o = space.wrap_list([1, 2, 3]) w_methoddict = w_o.class_shadow(space)._s_superclass._s_superclass.w_methoddict() - w_methoddict.as_methoddict_get_shadow(space).sync_cache() + w_methoddict.as_methoddict_get_shadow(space).sync_method_cache() selectors_w = w_methoddict.shadow.methoddict.keys() w_sel = None for sel in selectors_w: diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -1,14 +1,13 @@ import py import math import socket +from .util import bootstrap_class, BootstrappedObjSpace from spyvm import model, shadow from spyvm.shadow import MethodNotFound from spyvm import objspace, error, display from rpython.rlib.rarithmetic import intmask, r_uint -mockclass = objspace.bootstrap_class - -space = objspace.ObjSpace() +space = BootstrappedObjSpace() w_foo = space.wrap_string("foo") w_bar = space.wrap_string("bar") @@ -21,14 +20,14 @@ def test_new(): - w_mycls = mockclass(space, 0) + w_mycls = bootstrap_class(space, 0) w_myinstance = w_mycls.as_class_get_shadow(space).new() assert isinstance(w_myinstance, model.W_PointersObject) assert w_myinstance.getclass(space).is_same_object(w_mycls) assert w_myinstance.class_shadow(space) is w_mycls.as_class_get_shadow(space) def test_new_namedvars(): - w_mycls = mockclass(space, 3) + w_mycls = bootstrap_class(space, 3) w_myinstance = w_mycls.as_class_get_shadow(space).new() assert isinstance(w_myinstance, model.W_PointersObject) assert w_myinstance.getclass(space).is_same_object(w_mycls) @@ -38,7 +37,7 @@ assert w_myinstance.fetch(space, 1) is w_myinstance def test_bytes_object(): - w_class = mockclass(space, 0, format=shadow.BYTES) + w_class = bootstrap_class(space, 0, format=shadow.BYTES) w_bytes = w_class.as_class_get_shadow(space).new(20) assert w_bytes.getclass(space).is_same_object(w_class) assert w_bytes.size() == 20 @@ -50,7 +49,7 @@ py.test.raises(IndexError, lambda: w_bytes.getchar(20)) def test_c_bytes_object(): - w_class = mockclass(space, 0, format=shadow.BYTES) + w_class = bootstrap_class(space, 0, format=shadow.BYTES) w_bytes = w_class.as_class_get_shadow(space).new(20) w_bytes.convert_to_c_layout() assert w_bytes.getclass(space).is_same_object(w_class) @@ -63,7 +62,7 @@ py.test.raises(IndexError, lambda: w_bytes.getchar(20)) def test_word_object(): - w_class = mockclass(space, 0, format=shadow.WORDS) + w_class = bootstrap_class(space, 0, format=shadow.WORDS) w_bytes = w_class.as_class_get_shadow(space).new(20) assert w_bytes.getclass(space).is_same_object(w_class) assert w_bytes.size() == 20 @@ -75,7 +74,7 @@ py.test.raises(AssertionError, lambda: w_bytes.getword(20)) def test_c_word_object(): - w_class = mockclass(space, 0, format=shadow.WORDS) + w_class = bootstrap_class(space, 0, format=shadow.WORDS) w_bytes = w_class.as_class_get_shadow(space).new(20) w_bytes.convert_to_c_layout() assert w_bytes.getclass(space).is_same_object(w_class) @@ -93,11 +92,11 @@ self.val = val def as_compiledmethod_get_shadow(self, space): return self.val - w_class = mockclass(space, mockmethod(0)) + w_class = bootstrap_class(space, mockmethod(0)) shadow = w_class.as_class_get_shadow(space) shadow.installmethod(w_foo, mockmethod(1)) shadow.installmethod(w_bar, mockmethod(2)) - w_subclass = mockclass(space, 0, w_superclass=w_class) + w_subclass = bootstrap_class(space, 0, w_superclass=w_class) subshadow = w_subclass.as_class_get_shadow(space) assert subshadow.s_superclass() is shadow subshadow.installmethod(w_foo, mockmethod(3)) @@ -111,8 +110,8 @@ py.test.raises(MethodNotFound, subshadow.lookup, "zork") def test_w_compiledin(): - w_super = mockclass(space, 0) - w_class = mockclass(space, 0, w_superclass=w_super) + w_super = bootstrap_class(space, 0) + w_class = bootstrap_class(space, 0, w_superclass=w_super) supershadow = w_super.as_class_get_shadow(space) supershadow.installmethod(w_foo, model.W_CompiledMethod(0)) classshadow = w_class.as_class_get_shadow(space) @@ -127,7 +126,7 @@ def test_hashes(): w_five = model.W_SmallInteger(5) assert w_five.gethash() == 5 - w_class = mockclass(space, 0) + w_class = bootstrap_class(space, 0) w_inst = w_class.as_class_get_shadow(space).new() assert w_inst.hash == w_inst.UNASSIGNED_HASH h1 = w_inst.gethash() @@ -211,10 +210,10 @@ test_not_is_same_object(space.wrap_char('d'), space.wrap_float(3.0)) def test_become_pointers(): - w_clsa = mockclass(space, 3) + w_clsa = bootstrap_class(space, 3) w_a = w_clsa.as_class_get_shadow(space).new() - w_clsb = mockclass(space, 4) + w_clsb = bootstrap_class(space, 4) w_b = w_clsb.as_class_get_shadow(space).new() hasha = w_a.gethash() @@ -235,9 +234,9 @@ assert w_a.fetch(space, 1) is w_a def test_become_with_shadow(): - w_clsa = mockclass(space, 3) + w_clsa = bootstrap_class(space, 3) s_clsa = w_clsa.as_class_get_shadow(space) - w_clsb = mockclass(space, 4) + w_clsb = bootstrap_class(space, 4) s_clsb = w_clsb.as_class_get_shadow(space) res = w_clsa.become(w_clsb) assert res @@ -399,7 +398,7 @@ def test_weak_pointers(): from spyvm.shadow import WEAK_POINTERS - w_cls = mockclass(space, 1) + w_cls = bootstrap_class(space, 1) s_cls = w_cls.as_class_get_shadow(space) s_cls.instance_kind = WEAK_POINTERS diff --git a/spyvm/test/test_objectspace.py b/spyvm/test/test_objectspace.py --- a/spyvm/test/test_objectspace.py +++ b/spyvm/test/test_objectspace.py @@ -1,8 +1,9 @@ import py import sys from spyvm import objspace +from .util import BootstrappedObjSpace -space = objspace.ObjSpace() +space = BootstrappedObjSpace() def ismetaclass(w_cls): # Heuristic to detect if this is a metaclass. Don't use apart @@ -12,7 +13,7 @@ def test_every_class_is_an_instance_of_a_metaclass(): for (nm, w_cls) in space.classtable.items(): - assert ismetaclass(w_cls) or ismetaclass(w_cls.s_class._w_self) + assert ismetaclass(w_cls) or ismetaclass(w_cls.w_class) def test_every_metaclass_inherits_from_class_and_behavior(): s_Class = space.classtable['w_Class'].as_class_get_shadow(space) diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -1,22 +1,21 @@ import py import os import math +from .util import bootstrap_class from spyvm.primitives import prim_table, PrimitiveFailedError -from spyvm import model, shadow, interpreter, strategies +from spyvm import model, shadow, interpreter from spyvm import constants, primitives, objspace, wrapper, display from spyvm.plugins import bitblt +from .util import BootstrappedObjSpace from rpython.rlib.rfloat import INFINITY, NAN, isinf, isnan -mockclass = objspace.bootstrap_class - -space = objspace.ObjSpace() +space = BootstrappedObjSpace() class MockFrame(model.W_PointersObject): def __init__(self, stack): self.space = space size = 6 + len(stack) + 6 - self.strategy = strategies.ListStorageStrategy.singleton self.initialize_storage(space, size) self.store_all(space, [None] * 6 + stack + [space.w_nil] * 6) s_self = self.as_blockcontext_get_shadow(space) @@ -27,7 +26,7 @@ self.w_class = space.w_MethodContext def as_blockcontext_get_shadow(self, space): - if not self.shadow: + if not isinstance(self.shadow, shadow.BlockContextShadow): self.shadow = shadow.BlockContextShadow(space, self) return self.shadow @@ -221,7 +220,7 @@ assert prim(primitives.FLOAT_TIMES_TWO_POWER, [213.0, 1020]).value == float('inf') def test_at(): - w_obj = mockclass(space, 0, varsized=True).as_class_get_shadow(space).new(1) + w_obj = bootstrap_class(space, 0, varsized=True).as_class_get_shadow(space).new(1) foo = wrap("foo") w_obj.store(space, 0, foo) assert prim(primitives.AT, [w_obj, 1]) is foo @@ -232,11 +231,11 @@ assert prim(primitives.AT, [w_obj, 1]) == foo def test_invalid_at(): - w_obj = mockclass(space, 0).as_class_get_shadow(space).new() + w_obj = bootstrap_class(space, 0).as_class_get_shadow(space).new() prim_fails(primitives.AT, [w_obj, 1]) def test_at_put(): - w_obj = mockclass(space, 0, varsized=1).as_class_get_shadow(space).new(1) + w_obj = bootstrap_class(space, 0, varsized=1).as_class_get_shadow(space).new(1) assert prim(primitives.AT_PUT, [w_obj, 1, 22]).value == 22 assert prim(primitives.AT, [w_obj, 1]).value == 22 @@ -249,13 +248,13 @@ assert prim(primitives.AT, [w_str, 3]).value == ord('c') def test_invalid_at_put(): - w_obj = mockclass(space, 0).as_class_get_shadow(space).new() + w_obj = bootstrap_class(space, 0).as_class_get_shadow(space).new() prim_fails(primitives.AT_PUT, [w_obj, 1, 22]) def test_size(): - w_obj = mockclass(space, 0, varsized=True).as_class_get_shadow(space).new(0) + w_obj = bootstrap_class(space, 0, varsized=True).as_class_get_shadow(space).new(0) assert prim(primitives.SIZE, [w_obj]).value == 0 - w_obj = mockclass(space, 3, varsized=True).as_class_get_shadow(space).new(5) + w_obj = bootstrap_class(space, 3, varsized=True).as_class_get_shadow(space).new(5) assert prim(primitives.SIZE, [w_obj]).value == 5 def test_size_of_compiled_method(): @@ -279,7 +278,7 @@ prim_fails(primitives.OBJECT_AT, ["q", constants.CHARACTER_VALUE_INDEX+2]) def test_invalid_object_at_put(): - w_obj = mockclass(space, 1).as_class_get_shadow(space).new() + w_obj = bootstrap_class(space, 1).as_class_get_shadow(space).new() prim_fails(primitives.OBJECT_AT_PUT, [w_obj, 2, 42]) def test_string_at_put(): @@ -344,7 +343,7 @@ def test_as_oop(): # I checked potato, and that returns the hash for as_oop - w_obj = mockclass(space, 0).as_class_get_shadow(space).new() + w_obj = bootstrap_class(space, 0).as_class_get_shadow(space).new() w_obj.hash = 22 assert prim(primitives.AS_OOP, [w_obj]).value == 22 @@ -485,7 +484,7 @@ def test_new_method(): bytecode = ''.join(map(chr, [ 16, 119, 178, 154, 118, 164, 11, 112, 16, 118, 177, 224, 112, 16, 119, 177, 224, 176, 124 ])) - shadow = mockclass(space, 0).as_class_get_shadow(space) + shadow = bootstrap_class(space, 0).as_class_get_shadow(space) w_method = prim(primitives.NEW_METHOD, [space.w_CompiledMethod, len(bytecode), 1025]) assert w_method.literalat0(space, 0).value == 1025 assert w_method.literalsize == 2 @@ -497,7 +496,7 @@ assert w_v.bytes == list(IMAGENAME) def test_clone(): - w_obj = mockclass(space, 1, varsized=True).as_class_get_shadow(space).new(1) + w_obj = bootstrap_class(space, 1, varsized=True).as_class_get_shadow(space).new(1) w_obj.atput0(space, 0, space.wrap_int(1)) w_v = prim(primitives.CLONE, [w_obj]) assert space.unwrap_int(w_v.at0(space, 0)) == 1 @@ -812,7 +811,7 @@ try: monkeypatch.setattr(w_frame.shadow, "_sendSelfSelector", perform_mock) - monkeypatch.setattr(bitblt.BitBltShadow, "sync_cache", sync_cache_mock) + monkeypatch.setattr(bitblt.BitBltShadow, "attach_shadow", sync_cache_mock) with py.test.raises(CallCopyBitsSimulation): prim_table[primitives.BITBLT_COPY_BITS](interp, w_frame.as_context_get_shadow(space), argument_count-1) finally: diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -1,8 +1,8 @@ import random -from spyvm import model, shadow, constants, interpreter -from spyvm import objspace, strategies +from spyvm import model, shadow, constants, interpreter, objspace +from .util import BootstrappedObjSpace -space = objspace.ObjSpace() +space = BootstrappedObjSpace() w_Object = space.classtable['w_Object'] w_Metaclass = space.classtable['w_Metaclass'] @@ -42,7 +42,7 @@ w_class.store(space, constants.CLASS_FORMAT_INDEX, space.wrap_int(format)) if name is not None: w_class.store(space, constants.CLASS_NAME_INDEX, space.wrap_string(name)) - w_class.as_class_get_shadow(space).s_methoddict().sync_cache() + w_class.as_class_get_shadow(space).s_methoddict().sync_method_cache() return w_class def basicshape(name, format, kind, varsized, instsize): @@ -156,18 +156,18 @@ def assert_contains_nils(w_obj): for i in range(w_obj.size()): - assert model.w_nil == w_obj.strategy.fetch(i, space, w_obj) + assert model.w_nil == w_obj.fetch(space, i) def test_attach_mc(): w_m = method() w_object = methodcontext(pc=13, method=w_m) s_object = w_object.as_methodcontext_get_shadow(space) - assert_contains_nils(w_object) + assert s_object.fetch(1).value == 13 def test_attach_bc(): w_object = blockcontext(pc=13) s_object = w_object.as_blockcontext_get_shadow(space) - assert_contains_nils(w_object) + assert s_object.fetch(1).value == 13 def test_replace_to_bc(): w_object = blockcontext(pc=13) @@ -177,7 +177,7 @@ assert ([s_newobject.fetch(i) for i in range(s_newobject.size())] == [s_object.fetch(i) for i in range(s_newobject.size())]) assert w_object.shadow is s_newobject - assert_contains_nils(w_object) + assert s_object.fetch(1).value == 13 def test_compiledmethodshadow(): from test_model import joinbits @@ -240,17 +240,17 @@ w_class = build_smalltalk_class("Demo", 0x90, methods=methods) s_class = w_class.as_class_get_shadow(space) s_methoddict = s_class.s_methoddict() - s_methoddict.sync_cache() + s_methoddict.sync_method_cache() i = 0 - key = s_methoddict.w_self()._fetch(s_methoddict.space, constants.METHODDICT_NAMES_INDEX+i) + key = s_methoddict.w_self().fetch(s_methoddict.space, constants.METHODDICT_NAMES_INDEX+i) while key is space.w_nil: i = i + 1 - key = s_methoddict.w_self()._fetch(s_methoddict.space, constants.METHODDICT_NAMES_INDEX+i) + key = s_methoddict.w_self().fetch(s_methoddict.space, constants.METHODDICT_NAMES_INDEX+i) assert (s_class.lookup(key) is foo.as_compiledmethod_get_shadow(space) or s_class.lookup(key) is bar.as_compiledmethod_get_shadow(space)) # change that entry - w_array = s_class.w_methoddict()._fetch(s_class.space, constants.METHODDICT_VALUES_INDEX) + w_array = s_class.w_methoddict().fetch(s_class.space, constants.METHODDICT_VALUES_INDEX) version = s_class.version w_array.atput0(space, i, baz) @@ -269,8 +269,8 @@ key = space.wrap_string('foo') s_md = w_parent.as_class_get_shadow(space).s_methoddict() - s_md.sync_cache() - w_ary = s_md._w_self._fetch(s_md.space, constants.METHODDICT_VALUES_INDEX) + s_md.sync_method_cache() + w_ary = s_md._w_self.fetch(s_md.space, constants.METHODDICT_VALUES_INDEX) s_md._w_self.atput0(space, 0, key) w_ary.atput0(space, 0, w_method) diff --git a/spyvm/test/test_squeakimage.py b/spyvm/test/test_squeakimage.py --- a/spyvm/test/test_squeakimage.py +++ b/spyvm/test/test_squeakimage.py @@ -2,10 +2,10 @@ from spyvm import squeakimage from spyvm.squeakimage import chrs2int, chrs2long, swapped_chrs2long from spyvm import objspace - +from .util import BootstrappedObjSpace from struct import pack -space = objspace.ObjSpace() +space = BootstrappedObjSpace() # ----- helpers ---------------------------------------------- diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -1,5 +1,5 @@ import py -from spyvm import wrapper, model, interpreter, objspace, strategies +from spyvm import wrapper, model, interpreter, strategies from spyvm.model import w_nil from spyvm.test import test_miniimage as tools from spyvm.error import WrapperException, FatalError diff --git a/spyvm/test/test_wrapper.py b/spyvm/test/test_wrapper.py --- a/spyvm/test/test_wrapper.py +++ b/spyvm/test/test_wrapper.py @@ -1,10 +1,10 @@ import py from spyvm import wrapper, model, interpreter, objspace from spyvm.error import WrapperException, FatalError - +from .util import BootstrappedObjSpace from spyvm.test.test_interpreter import new_frame as new_frame_tuple -space = objspace.ObjSpace() +space = BootstrappedObjSpace() def new_frame(): return new_frame_tuple("")[0] diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -13,7 +13,7 @@ def find_symbol_in_methoddict_of(string, s_class): s_methoddict = s_class.s_methoddict() - s_methoddict.sync_cache() + s_methoddict.sync_method_cache() methoddict_w = s_methoddict.methoddict for each in methoddict_w.keys(): if each.as_string() == string: diff --git a/spyvm/test/util.py b/spyvm/test/util.py new file mode 100644 --- /dev/null +++ b/spyvm/test/util.py @@ -0,0 +1,176 @@ +from spyvm import model, shadow, objspace, version, constants +from rpython.rlib.objectmodel import instantiate + +class BootstrappedObjSpace(objspace.ObjSpace): + + def make_bootstrap_classes(self): + def define_core_cls(name, w_superclass, w_metaclass): + assert name.startswith('w_') + w_class = bootstrap_class(self, instsize=0, # XXX + w_superclass=w_superclass, + w_metaclass=w_metaclass, + name=name[2:]) + self.classtable[name] = w_class + return w_class + + # A complete minimal setup (including Behavior) would look like this + # + # class: superclass: metaclass: + # ------------------- ------------------- ------------------- + # Object *nil Object class + # Behavior Object Behavior class + # ClassDescription Behavior ClassDescription class + # Class ClassDescription Class class + # Metaclass ClassDescription Metaclass class + # Object class *Class *Metaclass + # Behavior class Object class *Metaclass + # ClassDescription cl Behavior class *Metaclass + # Class class ClassDescription cl *Metaclass + # Metaclass class ClassDescription cl *Metaclass + + # Class Name Super class name + cls_nm_tbl = [ + ["w_Object", "w_ProtoObject"], # there is not ProtoObject in mini.image + ["w_Behavior", "w_Object"], + ["w_ClassDescription", "w_Behavior"], + ["w_Class", "w_ClassDescription"], + ["w_Metaclass", "w_ClassDescription"], + ] + define_core_cls("w_ProtoObjectClass", None, None) + w_ProtoObjectClass = self.classtable["w_ProtoObjectClass"] + define_core_cls("w_ProtoObject", None, w_ProtoObjectClass) + for (cls_nm, super_cls_nm) in cls_nm_tbl: + meta_nm = cls_nm + "Class" + meta_super_nm = super_cls_nm + "Class" + w_metacls = define_core_cls(meta_nm, self.classtable[meta_super_nm], None) + define_core_cls(cls_nm, self.classtable[super_cls_nm], w_metacls) + w_Class = self.classtable["w_Class"] + w_Metaclass = self.classtable["w_Metaclass"] + # XXX + proto_shadow = w_ProtoObjectClass.shadow + proto_shadow.store_w_superclass(w_Class) + # at this point, all classes that still lack a w_class are themselves + # metaclasses + for nm, w_cls_obj in self.classtable.items(): + if w_cls_obj.w_class is None: + w_cls_obj.w_class = w_Metaclass + + def define_cls(cls_nm, supercls_nm, instvarsize=0, format=shadow.POINTERS, + varsized=False): + assert cls_nm.startswith("w_") + meta_nm = cls_nm + "Class" + meta_super_nm = supercls_nm + "Class" + w_Metaclass = self.classtable["w_Metaclass"] + w_meta_cls = self.classtable[meta_nm] = \ + bootstrap_class(self, 0, # XXX + self.classtable[meta_super_nm], + w_Metaclass, + name=meta_nm[2:]) + w_cls = self.classtable[cls_nm] = \ + bootstrap_class(self, instvarsize, + self.classtable[supercls_nm], + w_meta_cls, + format=format, + varsized=varsized, + name=cls_nm[2:]) + + define_cls("w_Magnitude", "w_Object") + define_cls("w_Character", "w_Magnitude", instvarsize=1) + define_cls("w_Number", "w_Magnitude") + define_cls("w_Integer", "w_Number") + define_cls("w_SmallInteger", "w_Integer") + define_cls("w_LargePositiveInteger", "w_Integer", format=shadow.BYTES) + define_cls("w_Float", "w_Number", format=shadow.BYTES) + define_cls("w_Message", "w_Object") + define_cls("w_Collection", "w_Object") + define_cls("w_SequenceableCollection", "w_Collection") + define_cls("w_ArrayedCollection", "w_SequenceableCollection") + define_cls("w_Array", "w_ArrayedCollection", varsized=True) + define_cls("w_String", "w_ArrayedCollection", format=shadow.BYTES) + define_cls("w_Bitmap", "w_ArrayedCollection", varsized=True, format=shadow.WORDS) + define_cls("w_UndefinedObject", "w_Object") + define_cls("w_Boolean", "w_Object") + define_cls("w_True", "w_Boolean") + define_cls("w_False", "w_Boolean") + define_cls("w_ByteArray", "w_ArrayedCollection", format=shadow.BYTES) + define_cls("w_MethodDict", "w_Object", instvarsize=2, varsized=True) + define_cls("w_CompiledMethod", "w_ByteArray", format=shadow.COMPILED_METHOD) + define_cls("w_ContextPart", "w_Object") + define_cls("w_MethodContext", "w_ContextPart") + define_cls("w_Link", "w_Object") + define_cls("w_Process", "w_Link") + define_cls("w_Point", "w_Object") + define_cls("w_LinkedList", "w_SequenceableCollection") + define_cls("w_Semaphore", "w_LinkedList") + define_cls("w_BlockContext", "w_ContextPart", + instvarsize=constants.BLKCTX_STACK_START) + define_cls("w_BlockClosure", "w_Object", + instvarsize=constants.BLKCLSR_SIZE, + varsized=True) + # make better accessors for classes that can be found in special object + # table + for name in constants.classes_in_special_object_table.keys(): + name = 'w_' + name + setattr(self, name, self.classtable.get(name)) + + def make_bootstrap_objects(self): + def bld_char(i): + w_cinst = self.w_Character.as_class_get_shadow(self).new() + w_cinst.store(self, constants.CHARACTER_VALUE_INDEX, + model.W_SmallInteger(i)) + return w_cinst + w_charactertable = model.W_PointersObject(self, + self.classtable['w_Array'], 256) + self.w_charactertable = w_charactertable + for i in range(256): + self.w_charactertable.atput0(self, i, bld_char(i)) + + + # Very special nil hack: in order to allow W_PointersObject's to + # initialize their fields to nil, we have to create it in the model + # package, and then patch up its fields here: + def patch_nil(w_nil): + w_nil.space = self + w_nil.s_class = self.classtable['w_UndefinedObject'].as_class_get_shadow(self) + w_nil.initialize_storage(self, 0) + return w_nil + w_nil = self.w_nil = patch_nil(model.w_nil) + + w_true = self.classtable['w_True'].as_class_get_shadow(self).new() + self.w_true = w_true + w_false = self.classtable['w_False'].as_class_get_shadow(self).new() + self.w_false = w_false + self.w_minus_one = model.W_SmallInteger(-1) + self.w_zero = model.W_SmallInteger(0) + self.w_one = model.W_SmallInteger(1) + self.w_two = model.W_SmallInteger(2) + w_special_selectors = model.W_PointersObject(self, + self.classtable['w_Array'], len(constants.SPECIAL_SELECTORS) * 2) + self.w_special_selectors = w_special_selectors + + self.objtable = {} + for name in constants.objects_in_special_object_table: + name = "w_" + name + try: + self.objtable[name] = locals()[name] + except KeyError, e: + self.objtable[name] = None + + +def bootstrap_class(space, instsize, w_superclass=None, w_metaclass=None, + name='?', format=shadow.POINTERS, varsized=False): + w_class = model.W_PointersObject(space, w_metaclass, 0) + s = instantiate(shadow.ClassShadow) + s.space = space + s.version = version.Version() + s._w_self = w_class + s.subclass_s = {} + s._s_superclass = None + s.store_w_superclass(w_superclass) + s.name = name + s._instance_size = instsize + s.instance_kind = format + s._s_methoddict = None + s.instance_varsized = varsized or format != shadow.POINTERS + w_class.store_shadow(s) + return w_class From noreply at buildbot.pypy.org Tue Mar 25 16:57:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 25 Mar 2014 16:57:13 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: merge default into branch Message-ID: <20140325155713.A8DCF1C3576@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70279:9359e47f4250 Date: 2014-03-25 17:47 +0200 http://bitbucket.org/pypy/pypy/changeset/9359e47f4250/ Log: merge default into branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -117,3 +117,10 @@ .. branch: improve-consecutive-dict-lookups Improve the situation when dict lookups of the same key are performed in a chain + +.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 +.. branch: test_SetFromErrnoWithFilename_NULL +.. branch: test_SetFromErrnoWithFilename__tweaks + +.. branch: refactor_PyErr_SetFromErrnoWithFilename +Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -150,14 +150,29 @@ this is used to define the filename attribute of the exception instance. Return value: always NULL.""" # XXX Doesn't actually do anything with PyErr_CheckSignals. + if llfilename: + w_filename = rffi.charp2str(llfilename) + filename = space.wrap(w_filename) + else: + filename = space.w_None + + PyErr_SetFromErrnoWithFilenameObject(space, w_type, filename) + + at cpython_api([PyObject, PyObject], PyObject) +def PyErr_SetFromErrnoWithFilenameObject(space, w_type, w_value): + """Similar to PyErr_SetFromErrno(), with the additional behavior that if + w_value is not NULL, it is passed to the constructor of type as a + third parameter. In the case of exceptions such as IOError and OSError, + this is used to define the filename attribute of the exception instance. + Return value: always NULL.""" + # XXX Doesn't actually do anything with PyErr_CheckSignals. errno = get_errno() msg = os.strerror(errno) - if llfilename: - w_filename = rffi.charp2str(llfilename) + if w_value: w_error = space.call_function(w_type, space.wrap(errno), space.wrap(msg), - space.wrap(w_filename)) + w_value) else: w_error = space.call_function(w_type, space.wrap(errno), diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -196,29 +196,136 @@ except OSError, e: assert e.errno == errno.EBADF assert e.strerror == os.strerror(errno.EBADF) - assert e.filename == None + assert e.filename is None def test_SetFromErrnoWithFilename(self): - import sys - if sys.platform != 'win32': - skip("callbacks through ll2ctypes modify errno") import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' errno = EBADF; - PyErr_SetFromErrnoWithFilename(PyExc_OSError, "blyf"); + PyErr_SetFromErrnoWithFilename(PyExc_OSError, "/path/to/file"); return NULL; '''), ], prologue="#include ") - try: - module.set_from_errno() - except OSError, e: - assert e.filename == "blyf" - assert e.errno == errno.EBADF - assert e.strerror == os.strerror(errno.EBADF) + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == "/path/to/file" + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilename_NULL(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyErr_SetFromErrnoWithFilename(PyExc_OSError, NULL); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename is None + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyString(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *filenameObject = PyString_FromString("/path/to/file"); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, filenameObject); + Py_DECREF(filenameObject); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == "/path/to/file" + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyInt(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *intObject = PyInt_FromLong(3); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, intObject); + Py_DECREF(intObject); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == 3 + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyList(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three"); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, lst); + Py_DECREF(lst); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == [1, 2, "three"] + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyTuple(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three"); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, tuple); + Py_DECREF(tuple); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == (1, 2, "three") + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__Py_None(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *none = Py_BuildValue(""); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, none); + Py_DECREF(none); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename is None + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_PyErr_Display(self): module = self.import_extension('foo', [ diff --git a/pypy/sandbox/test/test_pypy_interact.py b/pypy/sandbox/test/test_pypy_interact.py --- a/pypy/sandbox/test/test_pypy_interact.py +++ b/pypy/sandbox/test/test_pypy_interact.py @@ -1,4 +1,4 @@ -import os, sys, stat, errno +import os, stat, errno, py from pypy.sandbox.pypy_interact import PyPySandboxedProc from rpython.translator.interactive import Translation @@ -9,6 +9,9 @@ SITE_PY_CONTENT = LIB_PYTHON.join('site.py').read() ERROR_TEXT = os.strerror(errno.ENOENT) +if os.name == 'nt': + py.test.skip('sandbox not supported on windows') + def assert_(cond, text): if not cond: print "assert failed:", text diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -15,12 +15,12 @@ # # +--------------------+ <== aligned to 16 bytes # | return address | -# +--------------------+ -# | saved regs | -# +--------------------+ -# | scratch | -# | space | -# +--------------------+ <== aligned to 16 bytes +# +--------------------+ ----------------------. +# | saved regs | FRAME_FIXED_SIZE | +# +--------------------+ --------------------. | +# | scratch | PASS_ON_MY_FRAME | | +# | space | | | +# +--------------------+ <== aligned to 16 -----' ----' # All the rest of the data is in a GC-managed variable-size "frame". # This frame object's address is always stored in the register EBP/RBP. @@ -30,14 +30,14 @@ # start of every frame: the saved value of some registers if WORD == 4: - # ebp + ebx + esi + edi + 14 extra words + return address = 19 words + # ebp + ebx + esi + edi + 15 extra words = 19 words FRAME_FIXED_SIZE = 19 - PASS_ON_MY_FRAME = 14 + PASS_ON_MY_FRAME = 15 JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float else: - # rbp + rbx + r12 + r13 + r14 + r15 + 12 extra words + return address = 19 + # rbp + rbx + r12 + r13 + r14 + r15 + 13 extra words = 19 FRAME_FIXED_SIZE = 19 - PASS_ON_MY_FRAME = 12 + PASS_ON_MY_FRAME = 13 JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 From noreply at buildbot.pypy.org Tue Mar 25 16:57:14 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 25 Mar 2014 16:57:14 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: document branch Message-ID: <20140325155714.EFE421C3576@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70280:378118928b11 Date: 2014-03-25 17:54 +0200 http://bitbucket.org/pypy/pypy/changeset/378118928b11/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -124,3 +124,6 @@ .. branch: refactor_PyErr_SetFromErrnoWithFilename Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext + +.. branch: win32-fixes4 +fix more tests for win32 From noreply at buildbot.pypy.org Tue Mar 25 16:57:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 25 Mar 2014 16:57:16 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: close branch to be merged Message-ID: <20140325155716.2A9571C3576@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70281:3af64b2cdd03 Date: 2014-03-25 17:55 +0200 http://bitbucket.org/pypy/pypy/changeset/3af64b2cdd03/ Log: close branch to be merged From noreply at buildbot.pypy.org Tue Mar 25 16:57:17 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 25 Mar 2014 16:57:17 +0100 (CET) Subject: [pypy-commit] pypy default: merge win32-fixes4, which fixes some tests on win32 Message-ID: <20140325155717.C07AA1C3576@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70282:d2fa62bd86e8 Date: 2014-03-25 17:56 +0200 http://bitbucket.org/pypy/pypy/changeset/d2fa62bd86e8/ Log: merge win32-fixes4, which fixes some tests on win32 diff --git a/lib-python/2.7/test/test_genericpath.py b/lib-python/2.7/test/test_genericpath.py --- a/lib-python/2.7/test/test_genericpath.py +++ b/lib-python/2.7/test/test_genericpath.py @@ -231,9 +231,12 @@ unicwd = u'\xe7w\xf0' try: fsencoding = test_support.TESTFN_ENCODING or "ascii" - unicwd.encode(fsencoding) + asciival = unicwd.encode(fsencoding) + v = asciival.find('?') + if v >= 0: + raise UnicodeEncodeError(fsencoding, unicwd, v, v, asciival) except (AttributeError, UnicodeEncodeError): - # FS encoding is probably ASCII + # FS encoding is probably ASCII or windows and codepage is non-Latin1 pass else: with test_support.temp_cwd(unicwd): diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -124,3 +124,6 @@ .. branch: refactor_PyErr_SetFromErrnoWithFilename Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext + +.. branch: win32-fixes4 +fix more tests for win32 diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -1,3 +1,5 @@ +import sys + class AppTestCodecs: spaceconfig = { "usemodules": ['unicodedata', 'struct', 'binascii'], @@ -137,7 +139,9 @@ class AppTestPartialEvaluation: - spaceconfig = dict(usemodules=('array',)) + spaceconfig = dict(usemodules=['array',]) + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_winreg') def test_partial_utf8(self): import _codecs @@ -694,8 +698,18 @@ import sys if sys.platform != 'win32': return + toencode = u'caf\xe9', 'caf\xe9' + try: + #test for non-latin1 codepage, more general test needed + import _winreg + key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, + r'System\CurrentControlSet\Control\Nls\CodePage') + if _winreg.QueryValueEx(key, 'ACP')[0] == u'1255': #non-latin1 + toencode = u'caf\xbf','caf\xbf' + except: + assert False, 'cannot test mbcs on this windows system, check code page' assert u'test'.encode('mbcs') == 'test' - assert u'caf\xe9'.encode('mbcs') == 'caf\xe9' + assert toencode[0].encode('mbcs') == toencode[1] assert u'\u040a'.encode('mbcs') == '?' # some cyrillic letter assert 'cafx\e9'.decode('mbcs') == u'cafx\e9' diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py --- a/pypy/module/cpyext/test/test_eval.py +++ b/pypy/module/cpyext/test/test_eval.py @@ -312,8 +312,9 @@ ("get_flags", "METH_NOARGS", """ PyCompilerFlags flags; + int result; flags.cf_flags = 0; - int result = PyEval_MergeCompilerFlags(&flags); + result = PyEval_MergeCompilerFlags(&flags); return Py_BuildValue("ii", result, flags.cf_flags); """), ]) diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -20,10 +20,11 @@ def detect_model_from_c_compiler(): # based on http://sourceforge.net/p/predef/wiki/Architectures/ + # and http://msdn.microsoft.com/en-us/library/b0084kay.aspx mapping = { - MODEL_X86_64: ['__amd64__', '__amd64', '__x86_64__', '__x86_64'], - MODEL_ARM: ['__arm__', '__thumb__'], - MODEL_X86: ['i386', '__i386', '__i386__', '__i686__'], + MODEL_X86_64: ['__amd64__', '__amd64', '__x86_64__', '__x86_64', '_M_X64', '_M_AMD64'], + MODEL_ARM: ['__arm__', '__thumb__','_M_ARM_EP'], + MODEL_X86: ['i386', '__i386', '__i386__', '__i686__','_M_IX86'], MODEL_PPC_64: ['__powerpc64__'], } for k, v in mapping.iteritems(): diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -66,9 +66,11 @@ self.args = args class CallDescr(AbstractDescr): - def __init__(self, RESULT, ARGS, extrainfo): + from rpython.rlib.clibffi import FFI_DEFAULT_ABI + def __init__(self, RESULT, ARGS, extrainfo, ABI=FFI_DEFAULT_ABI): self.RESULT = RESULT self.ARGS = ARGS + self.ABI = ABI self.extrainfo = extrainfo def __repr__(self): @@ -428,7 +430,7 @@ try: return self.descrs[key] except KeyError: - descr = CallDescr(RESULT, ARGS, extrainfo) + descr = CallDescr(RESULT, ARGS, extrainfo, ABI=cif_description.abi) self.descrs[key] = descr return descr @@ -949,7 +951,7 @@ # graph, not to directly execute the python function result = self.cpu.maybe_on_top_of_llinterp(func, call_args, descr.RESULT) else: - FUNC = lltype.FuncType(descr.ARGS, descr.RESULT) + FUNC = lltype.FuncType(descr.ARGS, descr.RESULT, descr.ABI) func_to_call = rffi.cast(lltype.Ptr(FUNC), func) result = func_to_call(*call_args) del self.force_guard_op diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -14,6 +14,7 @@ from rpython.rlib.longlong2float import float2longlong, longlong2float from rpython.rlib.rarithmetic import ovfcheck, is_valid_int from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo class BasicTests: @@ -3228,11 +3229,12 @@ self.check_resops(arraylen_gc=2) def test_release_gil_flush_heap_cache(self): + eci = ExternalCompilationInfo() if sys.platform == "win32": - py.test.skip("needs 'time'") + eci = ExternalCompilationInfo(libraries=["msvcrt"]) T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True) + external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True, compilation_info=eci) # Not a real lock, has all the same properties with respect to GIL # release though, so good for this test. class Lock(object): @@ -3920,10 +3922,13 @@ self.interp_operations(f, []) def test_external_call(self): + eci = ExternalCompilationInfo() + if sys.platform == "win32": + eci = ExternalCompilationInfo(libraries=["msvcrt"]) from rpython.rlib.objectmodel import invoke_around_extcall T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T) + external = rffi.llexternal("time", [T], rffi.TIME_T, compilation_info=eci) class Oups(Exception): pass diff --git a/rpython/rlib/test/test_clibffi.py b/rpython/rlib/test/test_clibffi.py --- a/rpython/rlib/test/test_clibffi.py +++ b/rpython/rlib/test/test_clibffi.py @@ -423,11 +423,12 @@ def setup_class(cls): if sys.platform != 'win32': py.test.skip("Handle to libc library, Win-only test") - BaseFfiTest.setup_class(cls) + BaseFfiTest.setup_class() def test_get_libc_handle(self): handle = get_libc_handle() print get_libc_name() - print hex(handle) - assert handle != 0 - assert handle % 0x1000 == 0 + print dir(handle) + addr = rffi.cast(rffi.INT, handle) + assert addr != 0 + assert addr % 0x1000 == 0 diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -25,7 +25,7 @@ def as_unicode(self): return self.unistr -class BasePosixUnicode: +class BasePosixUnicodeOrAscii: def setup_method(self, method): self.ufilename = self._get_filename() try: @@ -34,9 +34,12 @@ py.test.skip("encoding not good enough") f.write("test") f.close() - - self.path = UnicodeWithEncoding(self.ufilename) - self.path2 = UnicodeWithEncoding(self.ufilename + ".new") + if sys.platform == 'win32' and isinstance(self.ufilename, str): + self.path = self.ufilename + self.path2 = self.ufilename + ".new" + else: + self.path = UnicodeWithEncoding(self.ufilename) + self.path2 = UnicodeWithEncoding(self.ufilename + ".new") def test_open(self): def f(): @@ -55,8 +58,11 @@ def test_stat(self): def f(): return rposix.stat(self.path).st_mtime - - assert interpret(f, []) == os.stat(self.ufilename).st_mtime + if sys.platform == 'win32': + #double vs. float, be satisfied with sub-millisec resolution + assert abs(interpret(f, []) - os.stat(self.ufilename).st_mtime) < 1e-4 + else: + assert interpret(f, []) == os.stat(self.ufilename).st_mtime def test_access(self): def f(): @@ -96,7 +102,11 @@ if sys.platform == 'win32': def f(): - return u', '.join(rposix.listdir(udir)) + if isinstance(udir.as_unicode(), str): + _udir = udir.as_unicode() + else: + _udir = udir + return u', '.join(rposix.listdir(_udir)) result = interpret(f, []) assert os.path.basename(self.ufilename) in ll_to_string(result) else: @@ -149,11 +159,11 @@ interpret(f, []) # does not crash -class TestPosixAscii(BasePosixUnicode): +class TestPosixAscii(BasePosixUnicodeOrAscii): def _get_filename(self): return str(udir.join('test_open_ascii')) -class TestPosixUnicode(BasePosixUnicode): +class TestPosixUnicode(BasePosixUnicodeOrAscii): def _get_filename(self): return (unicode(udir.join('test_open')) + u'\u65e5\u672c.txt') # "Japan" diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -358,6 +358,13 @@ if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): + + functype = ctypes.CFUNCTYPE + if sys.platform == 'win32': + from rpython.rlib.clibffi import FFI_STDCALL, FFI_DEFAULT_ABI + if getattr(T.TO, 'ABI', FFI_DEFAULT_ABI) == FFI_STDCALL: + # for win32 system call + functype = ctypes.WINFUNCTYPE argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS if ARG is not lltype.Void] if T.TO.RESULT is lltype.Void: @@ -366,10 +373,10 @@ restype = get_ctypes_type(T.TO.RESULT) try: kwds = {'use_errno': True} - return ctypes.CFUNCTYPE(restype, *argtypes, **kwds) + return functype(restype, *argtypes, **kwds) except TypeError: # unexpected 'use_errno' argument, old ctypes version - return ctypes.CFUNCTYPE(restype, *argtypes) + return functype(restype, *argtypes) elif isinstance(T.TO, lltype.OpaqueType): return ctypes.c_void_p else: diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -537,7 +537,7 @@ class FuncType(ContainerType): _gckind = 'raw' __name__ = 'func' - def __init__(self, args, result): + def __init__(self, args, result, abi='FFI_DEFAULT_ABI'): for arg in args: assert isinstance(arg, LowLevelType) # There are external C functions eating raw structures, not @@ -547,6 +547,7 @@ if isinstance(result, ContainerType): raise TypeError, "function result can only be primitive or pointer" self.RESULT = result + self.ABI = abi def __str__(self): args = ', '.join(map(str, self.ARGS)) From noreply at buildbot.pypy.org Tue Mar 25 17:03:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 17:03:58 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: I think it's preferable to avoid rare conflicts even at the cost Message-ID: <20140325160358.2BF931C35BE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70283:18557316383a Date: 2014-03-25 17:03 +0100 http://bitbucket.org/pypy/pypy/changeset/18557316383a/ Log: I think it's preferable to avoid rare conflicts even at the cost of one additional check for zero-ness during strdict lookups. diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -377,17 +377,17 @@ # special non-computed-yet value. if not s: return 0 - #with stm_ignored: - x = s.hash + with stm_ignored: + x = s.hash if x == 0: - x = _hash_string(s.chars) - if x == 0: - x = 29872897 - # XXX STM note: we would like this write to be stm-ignored, - # but we can't, because ll_strfasthash() might later miss - # the written value and return 0 again (rarely). Think - # again later about the best option. - #with stm_ignored: + x = LLHelpers._ll_compute_strhash(s) + return x + + def _ll_compute_strhash(s): + x = _hash_string(s.chars) + if x == 0: + x = 29872897 + with stm_ignored: s.hash = x return x @@ -395,7 +395,17 @@ return len(s.chars) def ll_strfasthash(s): - return s.hash # assumes that the hash is already computed + if rgc.stm_is_enabled(): + # due to "with stm_ignored" in _ll_strhash(), it is possible + # that just returning 's.hash' from here would rarely return + # the old value, which is 0. We need to check. + with stm_ignored: + x = s.hash + if x == 0: + x = LLHelpers._ll_compute_strhash(s) + return x + else: + return s.hash # assumes that the hash is already computed @jit.elidable def ll_strconcat(s1, s2): From noreply at buildbot.pypy.org Tue Mar 25 17:26:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Mar 2014 17:26:07 +0100 (CET) Subject: [pypy-commit] pypy default: cleanups Message-ID: <20140325162607.47AB31C30DC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70284:7950a956ce52 Date: 2014-03-25 12:24 -0400 http://bitbucket.org/pypy/pypy/changeset/7950a956ce52/ Log: cleanups diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -700,17 +700,17 @@ return toencode = u'caf\xe9', 'caf\xe9' try: - #test for non-latin1 codepage, more general test needed + # test for non-latin1 codepage, more general test needed import _winreg - key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, + key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, r'System\CurrentControlSet\Control\Nls\CodePage') - if _winreg.QueryValueEx(key, 'ACP')[0] == u'1255': #non-latin1 + if _winreg.QueryValueEx(key, 'ACP')[0] == u'1255': # non-latin1 toencode = u'caf\xbf','caf\xbf' except: assert False, 'cannot test mbcs on this windows system, check code page' assert u'test'.encode('mbcs') == 'test' assert toencode[0].encode('mbcs') == toencode[1] - assert u'\u040a'.encode('mbcs') == '?' # some cyrillic letter + assert u'\u040a'.encode('mbcs') == '?' # some cyrillic letter assert 'cafx\e9'.decode('mbcs') == u'cafx\e9' def test_bad_handler_string_result(self): diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -13,6 +13,7 @@ from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rclass, rstr +from rpython.rlib.clibffi import FFI_DEFAULT_ABI from rpython.rlib.rarithmetic import ovfcheck, r_uint, r_ulonglong from rpython.rlib.rtimer import read_timestamp @@ -66,7 +67,6 @@ self.args = args class CallDescr(AbstractDescr): - from rpython.rlib.clibffi import FFI_DEFAULT_ABI def __init__(self, RESULT, ARGS, extrainfo, ABI=FFI_DEFAULT_ABI): self.RESULT = RESULT self.ARGS = ARGS diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -59,9 +59,9 @@ def f(): return rposix.stat(self.path).st_mtime if sys.platform == 'win32': - #double vs. float, be satisfied with sub-millisec resolution + # double vs. float, be satisfied with sub-millisec resolution assert abs(interpret(f, []) - os.stat(self.ufilename).st_mtime) < 1e-4 - else: + else: assert interpret(f, []) == os.stat(self.ufilename).st_mtime def test_access(self): diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -358,7 +358,6 @@ if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): - functype = ctypes.CFUNCTYPE if sys.platform == 'win32': from rpython.rlib.clibffi import FFI_STDCALL, FFI_DEFAULT_ABI From noreply at buildbot.pypy.org Tue Mar 25 17:49:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 17:49:41 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fixed (I think): now accessing the raw data structure should not cause stm_become_inevitable, even Message-ID: <20140325164941.D44211C3576@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70285:b0f46f29bfa2 Date: 2014-03-25 17:45 +0100 http://bitbucket.org/pypy/pypy/changeset/b0f46f29bfa2/ Log: Fixed (I think): now accessing the raw data structure should not cause stm_become_inevitable, even though the structure has nested arrays diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -8,12 +8,15 @@ UINT32MAX = 2 ** 32 - 1 # keep in sync with the C code in pypy__decay_jit_counters -ENTRY = lltype.Struct('timetable_entry', - ('times', lltype.FixedSizeArray(rffi.FLOAT, 5)), - ('subhashes', lltype.FixedSizeArray(rffi.USHORT, 5)), - hints={'stm_dont_track_raw_accesses': True}) -ENTRY_ARRAY = lltype.Array(ENTRY, hints={'nolength': True, - 'stm_dont_track_raw_accesses': True}) +_h = {'stm_dont_track_raw_accesses': True} +ENTRY = lltype.Struct( + 'timetable_entry', + ('times', lltype.FixedSizeArray(rffi.FLOAT, 5, hints=_h)), + ('subhashes', lltype.FixedSizeArray(rffi.USHORT, 5, hints=_h)), + hints=_h) +ENTRY_ARRAY = lltype.Array( + ENTRY, + hints={'nolength': True, 'stm_dont_track_raw_accesses': True}) class JitCounter: From noreply at buildbot.pypy.org Tue Mar 25 17:49:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 17:49:43 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Tweaks Message-ID: <20140325164943.2EC331C357E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70286:b6601e2ca87d Date: 2014-03-25 17:46 +0100 http://bitbucket.org/pypy/pypy/changeset/b6601e2ca87d/ Log: Tweaks diff --git a/rpython/jit/backend/x86/stmtlocal.py b/rpython/jit/backend/x86/stmtlocal.py --- a/rpython/jit/backend/x86/stmtlocal.py +++ b/rpython/jit/backend/x86/stmtlocal.py @@ -23,4 +23,5 @@ 'pypy__threadlocal_base', [], lltype.Signed, compilation_info=eci, - _nowrapper=True) + _nowrapper=True, + transactionsafe=True) diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -125,7 +125,11 @@ try: info = op.args[0].value except IndexError: - info = "rstm.become_inevitable" # cannot insert it in 'llop' + info = "?" # cannot insert it in 'llop' + try: + info = '%s:%s' % funcgen.graph.name + except AttributeError: + pass string_literal = c_string_constant(info) return 'stm_become_inevitable(&stm_thread_local, %s);' % (string_literal,) From noreply at buildbot.pypy.org Tue Mar 25 17:49:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 17:49:44 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: merge heads Message-ID: <20140325164944.5E97E1C357E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70287:e5c759af77bd Date: 2014-03-25 17:48 +0100 http://bitbucket.org/pypy/pypy/changeset/e5c759af77bd/ Log: merge heads diff --git a/rpython/jit/backend/x86/stmtlocal.py b/rpython/jit/backend/x86/stmtlocal.py --- a/rpython/jit/backend/x86/stmtlocal.py +++ b/rpython/jit/backend/x86/stmtlocal.py @@ -23,4 +23,5 @@ 'pypy__threadlocal_base', [], lltype.Signed, compilation_info=eci, - _nowrapper=True) + _nowrapper=True, + transactionsafe=True) diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -8,12 +8,15 @@ UINT32MAX = 2 ** 32 - 1 # keep in sync with the C code in pypy__decay_jit_counters -ENTRY = lltype.Struct('timetable_entry', - ('times', lltype.FixedSizeArray(rffi.FLOAT, 5)), - ('subhashes', lltype.FixedSizeArray(rffi.USHORT, 5)), - hints={'stm_dont_track_raw_accesses': True}) -ENTRY_ARRAY = lltype.Array(ENTRY, hints={'nolength': True, - 'stm_dont_track_raw_accesses': True}) +_h = {'stm_dont_track_raw_accesses': True} +ENTRY = lltype.Struct( + 'timetable_entry', + ('times', lltype.FixedSizeArray(rffi.FLOAT, 5, hints=_h)), + ('subhashes', lltype.FixedSizeArray(rffi.USHORT, 5, hints=_h)), + hints=_h) +ENTRY_ARRAY = lltype.Array( + ENTRY, + hints={'nolength': True, 'stm_dont_track_raw_accesses': True}) class JitCounter: diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -125,7 +125,11 @@ try: info = op.args[0].value except IndexError: - info = "rstm.become_inevitable" # cannot insert it in 'llop' + info = "?" # cannot insert it in 'llop' + try: + info = '%s:%s' % funcgen.graph.name + except AttributeError: + pass string_literal = c_string_constant(info) return 'stm_become_inevitable(&stm_thread_local, %s);' % (string_literal,) From noreply at buildbot.pypy.org Tue Mar 25 17:50:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Mar 2014 17:50:54 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: oups Message-ID: <20140325165054.BD70A1C35BE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70288:4d60b78acf5c Date: 2014-03-25 17:49 +0100 http://bitbucket.org/pypy/pypy/changeset/4d60b78acf5c/ Log: oups diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -127,7 +127,7 @@ except IndexError: info = "?" # cannot insert it in 'llop' try: - info = '%s:%s' % funcgen.graph.name + info = '%s:%s' % (funcgen.graph.name, info) except AttributeError: pass string_literal = c_string_constant(info) From noreply at buildbot.pypy.org Tue Mar 25 18:59:10 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Mar 2014 18:59:10 +0100 (CET) Subject: [pypy-commit] pypy default: fix ndarray setitem with empty index (issue1719) Message-ID: <20140325175910.E3A731C309E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70289:26e0a68bb6dd Date: 2014-03-25 13:51 -0400 http://bitbucket.org/pypy/pypy/changeset/26e0a68bb6dd/ Log: fix ndarray setitem with empty index (issue1719) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -185,7 +185,7 @@ return chunks.apply(space, self) shape = res_shape + self.get_shape()[len(indexes):] w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), - self.get_order(), w_instance=self) + self.get_order(), w_instance=self) if not w_res.get_size(): return w_res return loop.getitem_array_int(space, self, w_res, iter_shape, indexes, @@ -201,6 +201,8 @@ view = chunks.apply(space, self) view.implementation.setslice(space, val_arr) return + if support.product(iter_shape) == 0: + return loop.setitem_array_int(space, self, iter_shape, indexes, val_arr, prefix) @@ -1169,7 +1171,7 @@ raise OperationError(space.w_TypeError, space.wrap( "numpy scalars from buffers not supported yet")) totalsize = support.product(shape) * dtype.elsize - if totalsize+offset > buf.getlength(): + if totalsize + offset > buf.getlength(): raise OperationError(space.w_TypeError, space.wrap( "buffer is too small for requested array")) storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2369,6 +2369,19 @@ assert b.shape == b[...].shape assert (b == b[...]).all() + def test_empty_indexing(self): + import numpy as np + r = np.ones(3) + ind = np.array([], np.int32) + tmp = np.array([], np.float64) + assert r[ind].shape == (0,) + r[ind] = 0 + assert (r == np.ones(3)).all() + r[ind] = tmp + assert (r == np.ones(3)).all() + r[[]] = 0 + assert (r == np.ones(3)).all() + class AppTestNumArrayFromBuffer(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) From noreply at buildbot.pypy.org Tue Mar 25 22:20:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Mar 2014 22:20:32 +0100 (CET) Subject: [pypy-commit] pypy default: only apply this workaround for mbcs Message-ID: <20140325212032.2112C1C309D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70290:ba471437d878 Date: 2014-03-25 17:19 -0400 http://bitbucket.org/pypy/pypy/changeset/ba471437d878/ Log: only apply this workaround for mbcs diff --git a/lib-python/2.7/test/test_genericpath.py b/lib-python/2.7/test/test_genericpath.py --- a/lib-python/2.7/test/test_genericpath.py +++ b/lib-python/2.7/test/test_genericpath.py @@ -232,9 +232,11 @@ try: fsencoding = test_support.TESTFN_ENCODING or "ascii" asciival = unicwd.encode(fsencoding) - v = asciival.find('?') - if v >= 0: - raise UnicodeEncodeError(fsencoding, unicwd, v, v, asciival) + if fsencoding == "mbcs": + # http://bugs.python.org/issue850997 + v = asciival.find('?') + if v >= 0: + raise UnicodeEncodeError(fsencoding, unicwd, v, v, asciival) except (AttributeError, UnicodeEncodeError): # FS encoding is probably ASCII or windows and codepage is non-Latin1 pass From noreply at buildbot.pypy.org Wed Mar 26 00:56:02 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 26 Mar 2014 00:56:02 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix for 32bit platforms Message-ID: <20140325235602.69A9D1D23C3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70291:aef46bd9f030 Date: 2014-03-25 16:38 -0700 http://bitbucket.org/pypy/pypy/changeset/aef46bd9f030/ Log: fix for 32bit platforms diff --git a/pypy/objspace/std/test/test_smalllongobject.py b/pypy/objspace/std/test/test_smalllongobject.py --- a/pypy/objspace/std/test/test_smalllongobject.py +++ b/pypy/objspace/std/test/test_smalllongobject.py @@ -51,7 +51,7 @@ from pypy.interpreter import gateway from pypy.objspace.std.smalllongobject import W_SmallLongObject def w__long(space, w_obj): - return W_SmallLongObject.fromint(space.int_w(w_obj)) + return W_SmallLongObject.frombigint(space.bigint_w(w_obj)) cls.w__long = cls.space.wrap(gateway.interp2app(w__long)) def test_sl_simple(self): From noreply at buildbot.pypy.org Wed Mar 26 00:56:04 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 26 Mar 2014 00:56:04 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140325235604.8E96F1D23C3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70292:e1951a6821db Date: 2014-03-25 16:53 -0700 http://bitbucket.org/pypy/pypy/changeset/e1951a6821db/ Log: merge default diff --git a/lib-python/2.7/test/test_genericpath.py b/lib-python/2.7/test/test_genericpath.py --- a/lib-python/2.7/test/test_genericpath.py +++ b/lib-python/2.7/test/test_genericpath.py @@ -231,9 +231,14 @@ unicwd = u'\xe7w\xf0' try: fsencoding = test_support.TESTFN_ENCODING or "ascii" - unicwd.encode(fsencoding) + asciival = unicwd.encode(fsencoding) + if fsencoding == "mbcs": + # http://bugs.python.org/issue850997 + v = asciival.find('?') + if v >= 0: + raise UnicodeEncodeError(fsencoding, unicwd, v, v, asciival) except (AttributeError, UnicodeEncodeError): - # FS encoding is probably ASCII + # FS encoding is probably ASCII or windows and codepage is non-Latin1 pass else: with test_support.temp_cwd(unicwd): diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -117,3 +117,13 @@ .. branch: improve-consecutive-dict-lookups Improve the situation when dict lookups of the same key are performed in a chain + +.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 +.. branch: test_SetFromErrnoWithFilename_NULL +.. branch: test_SetFromErrnoWithFilename__tweaks + +.. branch: refactor_PyErr_SetFromErrnoWithFilename +Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext + +.. branch: win32-fixes4 +fix more tests for win32 diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -1,3 +1,5 @@ +import sys + class AppTestCodecs: spaceconfig = { "usemodules": ['unicodedata', 'struct', 'binascii'], @@ -138,7 +140,9 @@ class AppTestPartialEvaluation: - spaceconfig = dict(usemodules=('array',)) + spaceconfig = dict(usemodules=['array',]) + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_winreg') def test_partial_utf8(self): import _codecs @@ -753,9 +757,25 @@ import sys if sys.platform != 'win32': return - assert 'test'.encode('mbcs') == b'test' - assert 'caf\xe9'.encode('mbcs') == b'caf\xe9' - raises(UnicodeEncodeError, '\u040a'.encode, 'mbcs') - raises(UnicodeEncodeError, - "-\u5171\u0141\u2661\u0363\uDC80".encode, 'mbcs') - assert b'cafx\e9'.decode('mbcs') == 'cafx\e9' + toencode = u'caf\xe9', b'caf\xe9' + try: + # test for non-latin1 codepage, more general test needed + import _winreg + key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, + r'System\CurrentControlSet\Control\Nls\CodePage') + if _winreg.QueryValueEx(key, 'ACP')[0] == u'1255': # non-latin1 + toencode = u'caf\xbf',b'caf\xbf' + except: + assert False, 'cannot test mbcs on this windows system, check code page' + assert u'test'.encode('mbcs') == b'test' + assert toencode[0].encode('mbcs') == toencode[1] + assert u'\u040a'.encode('mbcs') == b'?' # some cyrillic letter + assert b'cafx\e9'.decode('mbcs') == u'cafx\e9' + + def test_bad_handler_string_result(self): + import _codecs + def f(exc): + return (b'foo', exc.end) + _codecs.register_error("test.test_codecs_not_a_string", f) + raises(TypeError, u'\u1234'.encode, 'ascii', + 'test.test_codecs_not_a_string') diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -146,14 +146,29 @@ this is used to define the filename attribute of the exception instance. Return value: always NULL.""" # XXX Doesn't actually do anything with PyErr_CheckSignals. + if llfilename: + w_filename = rffi.charp2str(llfilename) + filename = space.wrap(w_filename) + else: + filename = space.w_None + + PyErr_SetFromErrnoWithFilenameObject(space, w_type, filename) + + at cpython_api([PyObject, PyObject], PyObject) +def PyErr_SetFromErrnoWithFilenameObject(space, w_type, w_value): + """Similar to PyErr_SetFromErrno(), with the additional behavior that if + w_value is not NULL, it is passed to the constructor of type as a + third parameter. In the case of exceptions such as IOError and OSError, + this is used to define the filename attribute of the exception instance. + Return value: always NULL.""" + # XXX Doesn't actually do anything with PyErr_CheckSignals. errno = get_errno() msg = os.strerror(errno) - if llfilename: - w_filename = rffi.charp2str(llfilename) + if w_value: w_error = space.call_function(w_type, space.wrap(errno), space.wrap(msg), - space.wrap(w_filename)) + w_value) else: w_error = space.call_function(w_type, space.wrap(errno), diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py --- a/pypy/module/cpyext/test/test_eval.py +++ b/pypy/module/cpyext/test/test_eval.py @@ -312,8 +312,9 @@ ("get_flags", "METH_NOARGS", """ PyCompilerFlags flags; + int result; flags.cf_flags = 0; - int result = PyEval_MergeCompilerFlags(&flags); + result = PyEval_MergeCompilerFlags(&flags); return Py_BuildValue("ii", result, flags.cf_flags); """), ]) diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -186,29 +186,136 @@ except OSError as e: assert e.errno == errno.EBADF assert e.strerror == os.strerror(errno.EBADF) - assert e.filename == None + assert e.filename is None def test_SetFromErrnoWithFilename(self): - import sys - if sys.platform != 'win32': - skip("callbacks through ll2ctypes modify errno") import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' errno = EBADF; - PyErr_SetFromErrnoWithFilename(PyExc_OSError, "blyf"); + PyErr_SetFromErrnoWithFilename(PyExc_OSError, "/path/to/file"); return NULL; '''), ], prologue="#include ") - try: - module.set_from_errno() - except OSError as e: - assert e.filename == "blyf" - assert e.errno == errno.EBADF - assert e.strerror == os.strerror(errno.EBADF) + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == "/path/to/file" + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilename_NULL(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyErr_SetFromErrnoWithFilename(PyExc_OSError, NULL); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename is None + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyString(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *filenameObject = PyString_FromString("/path/to/file"); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, filenameObject); + Py_DECREF(filenameObject); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == "/path/to/file" + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyInt(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *intObject = PyInt_FromLong(3); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, intObject); + Py_DECREF(intObject); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == 3 + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyList(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three"); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, lst); + Py_DECREF(lst); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == [1, 2, "three"] + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__PyTuple(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three"); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, tuple); + Py_DECREF(tuple); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename == (1, 2, "three") + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) + + def test_SetFromErrnoWithFilenameObject__Py_None(self): + import errno, os + + module = self.import_extension('foo', [ + ("set_from_errno", "METH_NOARGS", + ''' + errno = EBADF; + PyObject *none = Py_BuildValue(""); + PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, none); + Py_DECREF(none); + return NULL; + '''), + ], + prologue="#include ") + exc_info = raises(OSError, module.set_from_errno) + assert exc_info.value.filename is None + assert exc_info.value.errno == errno.EBADF + assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_PyErr_Display(self): module = self.import_extension('foo', [ diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -185,7 +185,7 @@ return chunks.apply(space, self) shape = res_shape + self.get_shape()[len(indexes):] w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), - self.get_order(), w_instance=self) + self.get_order(), w_instance=self) if not w_res.get_size(): return w_res return loop.getitem_array_int(space, self, w_res, iter_shape, indexes, @@ -201,6 +201,8 @@ view = chunks.apply(space, self) view.implementation.setslice(space, val_arr) return + if support.product(iter_shape) == 0: + return loop.setitem_array_int(space, self, iter_shape, indexes, val_arr, prefix) @@ -1159,7 +1161,7 @@ raise OperationError(space.w_TypeError, space.wrap( "numpy scalars from buffers not supported yet")) totalsize = support.product(shape) * dtype.elsize - if totalsize+offset > buf.getlength(): + if totalsize + offset > buf.getlength(): raise OperationError(space.w_TypeError, space.wrap( "buffer is too small for requested array")) storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2360,6 +2360,19 @@ assert b.shape == b[...].shape assert (b == b[...]).all() + def test_empty_indexing(self): + import numpy as np + r = np.ones(3) + ind = np.array([], np.int32) + tmp = np.array([], np.float64) + assert r[ind].shape == (0,) + r[ind] = 0 + assert (r == np.ones(3)).all() + r[ind] = tmp + assert (r == np.ones(3)).all() + r[[]] = 0 + assert (r == np.ones(3)).all() + class AppTestNumArrayFromBuffer(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -70,14 +70,14 @@ p13 = new(descr=...) p15 = new_array(8, descr=) setfield_gc(p13, p15, descr=) - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) setfield_gc(p13, 16, descr=) guard_no_exception(descr=...) p20 = new_with_vtable(ConstClass(W_IntObject)) call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) + i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, descr=) guard_no_exception(descr=...) i26 = int_and(i23, .*) i27 = int_is_true(i26) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -141,15 +141,16 @@ i = 0 b = B(1) while i < 100: - b.x - v = b.x # ID: loadattr + v = b.x # ID: loadattr1 + v = b.x # ID: loadattr2 i += v return i log = self.run(main, [], threshold=80) loop, = log.loops_by_filename(self.filepath) - assert loop.match_by_id('loadattr', + assert loop.match_by_id('loadattr1', ''' + guard_not_invalidated(descr=...) i19 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...) guard_no_exception(descr=...) i21 = int_and(i19, _) @@ -161,6 +162,7 @@ i29 = int_is_true(i28) guard_true(i29, descr=...) ''') + assert loop.match_by_id('loadattr2', "") # completely folded away def test_python_contains(self): def main(): diff --git a/pypy/sandbox/test/test_pypy_interact.py b/pypy/sandbox/test/test_pypy_interact.py --- a/pypy/sandbox/test/test_pypy_interact.py +++ b/pypy/sandbox/test/test_pypy_interact.py @@ -1,4 +1,4 @@ -import os, sys, stat, errno +import os, stat, errno, py from pypy.sandbox.pypy_interact import PyPySandboxedProc from rpython.translator.interactive import Translation @@ -9,6 +9,9 @@ SITE_PY_CONTENT = LIB_PYTHON.join('site.py').read() ERROR_TEXT = os.strerror(errno.ENOENT) +if os.name == 'nt': + py.test.skip('sandbox not supported on windows') + def assert_(cond, text): if not cond: print "assert failed:", text diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -584,7 +584,10 @@ emit_op_getfield_gc_pure = emit_op_getfield_gc def emit_op_increment_debug_counter(self, op, arglocs, regalloc, fcond): - # XXX implement me + base_loc, value_loc = arglocs + self.mc.LDR_ri(value_loc.value, base_loc.value, 0, cond=fcond) + self.mc.ADD_ri(value_loc.value, value_loc.value, 1, cond=fcond) + self.mc.STR_ri(value_loc.value, base_loc.value, 0, cond=fcond) return fcond def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -850,8 +850,12 @@ prepare_op_getfield_gc_pure = prepare_op_getfield_gc def prepare_op_increment_debug_counter(self, op, fcond): - # XXX implement me - return [] + boxes = op.getarglist() + a0, = boxes + base_loc = self.make_sure_var_in_reg(a0, boxes) + value_loc = self.get_scratch_reg(INT, boxes) + self.free_temp_vars() + return [base_loc, value_loc] def prepare_op_getinteriorfield_gc(self, op, fcond): t = unpack_interiorfielddescr(op.getdescr()) diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -20,10 +20,11 @@ def detect_model_from_c_compiler(): # based on http://sourceforge.net/p/predef/wiki/Architectures/ + # and http://msdn.microsoft.com/en-us/library/b0084kay.aspx mapping = { - MODEL_X86_64: ['__amd64__', '__amd64', '__x86_64__', '__x86_64'], - MODEL_ARM: ['__arm__', '__thumb__'], - MODEL_X86: ['i386', '__i386', '__i386__', '__i686__'], + MODEL_X86_64: ['__amd64__', '__amd64', '__x86_64__', '__x86_64', '_M_X64', '_M_AMD64'], + MODEL_ARM: ['__arm__', '__thumb__','_M_ARM_EP'], + MODEL_X86: ['i386', '__i386', '__i386__', '__i686__','_M_IX86'], MODEL_PPC_64: ['__powerpc64__'], } for k, v in mapping.iteritems(): diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -13,6 +13,7 @@ from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rclass, rstr +from rpython.rlib.clibffi import FFI_DEFAULT_ABI from rpython.rlib.rarithmetic import ovfcheck, r_uint, r_ulonglong from rpython.rlib.rtimer import read_timestamp @@ -66,9 +67,10 @@ self.args = args class CallDescr(AbstractDescr): - def __init__(self, RESULT, ARGS, extrainfo): + def __init__(self, RESULT, ARGS, extrainfo, ABI=FFI_DEFAULT_ABI): self.RESULT = RESULT self.ARGS = ARGS + self.ABI = ABI self.extrainfo = extrainfo def __repr__(self): @@ -428,7 +430,7 @@ try: return self.descrs[key] except KeyError: - descr = CallDescr(RESULT, ARGS, extrainfo) + descr = CallDescr(RESULT, ARGS, extrainfo, ABI=cif_description.abi) self.descrs[key] = descr return descr @@ -949,7 +951,7 @@ # graph, not to directly execute the python function result = self.cpu.maybe_on_top_of_llinterp(func, call_args, descr.RESULT) else: - FUNC = lltype.FuncType(descr.ARGS, descr.RESULT) + FUNC = lltype.FuncType(descr.ARGS, descr.RESULT, descr.ABI) func_to_call = rffi.cast(lltype.Ptr(FUNC), func) result = func_to_call(*call_args) del self.force_guard_op diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -545,7 +545,7 @@ p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) setfield_gc(p1, 10, descr=unicodelendescr) - p2 = call_malloc_nursery_varsize(2, 4, i2, \ + p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\ descr=unicodedescr) setfield_gc(p2, i2, descr=unicodelendescr) p3 = call_malloc_nursery_varsize(1, 1, i2, \ diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -15,12 +15,12 @@ # # +--------------------+ <== aligned to 16 bytes # | return address | -# +--------------------+ -# | saved regs | -# +--------------------+ -# | scratch | -# | space | -# +--------------------+ <== aligned to 16 bytes +# +--------------------+ ----------------------. +# | saved regs | FRAME_FIXED_SIZE | +# +--------------------+ --------------------. | +# | scratch | PASS_ON_MY_FRAME | | +# | space | | | +# +--------------------+ <== aligned to 16 -----' ----' # All the rest of the data is in a GC-managed variable-size "frame". # This frame object's address is always stored in the register EBP/RBP. @@ -30,14 +30,14 @@ # start of every frame: the saved value of some registers if WORD == 4: - # ebp + ebx + esi + edi + 14 extra words + return address = 19 words + # ebp + ebx + esi + edi + 15 extra words = 19 words FRAME_FIXED_SIZE = 19 - PASS_ON_MY_FRAME = 14 + PASS_ON_MY_FRAME = 15 JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float else: - # rbp + rbx + r12 + r13 + r14 + r15 + 12 extra words + return address = 19 + # rbp + rbx + r12 + r13 + r14 + r15 + 13 extra words = 19 FRAME_FIXED_SIZE = 19 - PASS_ON_MY_FRAME = 12 + PASS_ON_MY_FRAME = 13 JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -103,8 +103,10 @@ extradescrs=None): key = (frozenset_or_none(readonly_descrs_fields), frozenset_or_none(readonly_descrs_arrays), + frozenset_or_none(readonly_descrs_interiorfields), frozenset_or_none(write_descrs_fields), frozenset_or_none(write_descrs_arrays), + frozenset_or_none(write_descrs_interiorfields), extraeffect, oopspecindex, can_invalidate) @@ -222,6 +224,18 @@ descr = cpu.interiorfielddescrof(T, fieldname) descrs_interiorfields.append(descr) + # a read or a write to an interiorfield, inside an array of + # structs, is additionally recorded as a read or write of + # the array itself + extraef = set() + for tup in effects: + if tup[0] == "interiorfield" or tup[0] == "readinteriorfield": + T = deref(tup[1]) + if isinstance(T, lltype.Array) and consider_array(T): + extraef.add((tup[0].replace("interiorfield", "array"), + tup[1])) + effects |= extraef + for tup in effects: if tup[0] == "struct": add_struct(write_descrs_fields, tup) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1854,8 +1854,7 @@ def _handle_dict_lookup_call(self, op, oopspec_name, args): extradescr1 = self.cpu.fielddescrof(op.args[1].concretetype.TO, 'entries') - extradescr2 = self.cpu.interiorfielddescrof( - op.args[1].concretetype.TO.entries.TO, 'key') + extradescr2 = self.cpu.arraydescrof(op.args[1].concretetype.TO.entries.TO) return self._handle_oopspec_call(op, args, EffectInfo.OS_DICT_LOOKUP, extradescr=[extradescr1, extradescr2]) diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -73,7 +73,7 @@ def guess_call_kind(self, op): return 'residual' def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None): + extraeffect=None, extradescr=None): try: name = op.args[0].value._obj._name if 'cannot_raise' in name or name.startswith('cast_'): diff --git a/rpython/jit/codewriter/test/test_longlong.py b/rpython/jit/codewriter/test/test_longlong.py --- a/rpython/jit/codewriter/test/test_longlong.py +++ b/rpython/jit/codewriter/test/test_longlong.py @@ -17,7 +17,7 @@ class FakeBuiltinCallControl: def guess_call_kind(self, op): return 'builtin' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None, extradescr=None): assert oopspecindex is not None # in this test return 'calldescr-%d' % oopspecindex def calldescr_canraise(self, calldescr): diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -108,6 +108,7 @@ raise NotImplementedError def getaddr(self): + "Only for raw addresses (BoxInt & ConstInt), not for GC addresses" raise NotImplementedError def sort_key(self): @@ -321,9 +322,6 @@ else: return 0 - def getaddr(self): - return llmemory.cast_ptr_to_adr(self.value) - def same_constant(self, other): if isinstance(other, ConstPtr): return self.value == other.value @@ -494,9 +492,6 @@ return lltype.cast_opaque_ptr(PTR, self.getref_base()) getref._annspecialcase_ = 'specialize:arg(1)' - def getaddr(self): - return llmemory.cast_ptr_to_adr(self.value) - def _get_hash_(self): if self.value: return lltype.identityhash(self.value) diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -72,6 +72,9 @@ def _make_log_operations(self): return LogOperations(self.metainterp_sd, self.guard_number) + def repr_of_resop(self, op): + return LogOperations(self.metainterp_sd, self.guard_number).repr_of_resop(op) + class LogOperations(object): """ diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -177,7 +177,7 @@ self.cached_arrayitems = {} # cached dict items: {dict descr: {(optval, index): box-or-const}} self.cached_dict_reads = {} - # cache of corresponding array descrs + # cache of corresponding {array descrs: dict 'entries' field descr} self.corresponding_array_descrs = {} # self._lazy_setfields_and_arrayitems = [] @@ -346,9 +346,8 @@ self.force_lazy_setfield(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) - for descr in effectinfo.write_descrs_interiorfields: - if descr in self.corresponding_array_descrs: - dictdescr = self.corresponding_array_descrs.pop(descr) + if arraydescr in self.corresponding_array_descrs: + dictdescr = self.corresponding_array_descrs.pop(arraydescr) try: del self.cached_dict_reads[dictdescr] except KeyError: diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -242,8 +242,9 @@ box = value.box assert isinstance(box, Const) if not box.same_constant(constbox): - raise InvalidLoop('A GUARD_{VALUE,TRUE,FALSE} was proven to' + - 'always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_{VALUE,TRUE,FALSE} (%s) was proven ' + 'to always fail' % r) return if emit_operation: self.emit_operation(op) @@ -255,7 +256,9 @@ if value.is_null(): return elif value.is_nonnull(): - raise InvalidLoop('A GUARD_ISNULL was proven to always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_ISNULL (%s) was proven to always fail' + % r) self.emit_operation(op) value.make_constant(self.optimizer.cpu.ts.CONST_NULL) @@ -264,7 +267,9 @@ if value.is_nonnull(): return elif value.is_null(): - raise InvalidLoop('A GUARD_NONNULL was proven to always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_NONNULL (%s) was proven to always fail' + % r) self.emit_operation(op) value.make_nonnull(op) @@ -292,7 +297,8 @@ assert previous_classbox is not None assert expected_classbox is not None if not previous_classbox.same_constant(expected_classbox): - raise InvalidLoop('A GUARD_VALUE was proven to always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_VALUE (%s) was proven to always fail' % r) op = old_guard_op.copy_and_change(rop.GUARD_VALUE, args = [old_guard_op.getarg(0), op.getarg(1)]) self.optimizer.replaces_guard[op] = old_guard_op @@ -333,7 +339,9 @@ if realclassbox is not None: if realclassbox.same_constant(expectedclassbox): return - raise InvalidLoop('A GUARD_CLASS was proven to always fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_CLASS (%s) was proven to always fail' + % r) if value.last_guard: # there already has been a guard_nonnull or guard_class or # guard_nonnull_class on this value. @@ -356,8 +364,9 @@ def optimize_GUARD_NONNULL_CLASS(self, op): value = self.getvalue(op.getarg(0)) if value.is_null(): - raise InvalidLoop('A GUARD_NONNULL_CLASS was proven to always ' + - 'fail') + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + raise InvalidLoop('A GUARD_NONNULL_CLASS (%s) was proven to ' + 'always fail' % r) self.optimize_GUARD_CLASS(op) def optimize_CALL_LOOPINVARIANT(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -319,6 +319,9 @@ def log_loop(*args): pass + class logger_ops: + repr_of_resop = repr + class warmrunnerdesc: class memory_manager: retrace_limit = 5 diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -139,7 +139,13 @@ txt1 = str(op1) txt2 = str(op2) while txt1 or txt2: - print '%s| %s' % (txt1[:width].ljust(width), txt2[:width]) + part1 = txt1[:width] + part2 = txt2[:width] + if part1 == part2: + sep = '| ' + else: + sep = '<>' + print '%s%s%s' % (part1.ljust(width), sep, part2) txt1 = txt1[width:] txt2 = txt2[width:] print '-' * totwidth diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -14,6 +14,7 @@ from rpython.rlib.longlong2float import float2longlong, longlong2float from rpython.rlib.rarithmetic import ovfcheck, is_valid_int from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo class BasicTests: @@ -3228,11 +3229,12 @@ self.check_resops(arraylen_gc=2) def test_release_gil_flush_heap_cache(self): + eci = ExternalCompilationInfo() if sys.platform == "win32": - py.test.skip("needs 'time'") + eci = ExternalCompilationInfo(libraries=["msvcrt"]) T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True) + external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True, compilation_info=eci) # Not a real lock, has all the same properties with respect to GIL # release though, so good for this test. class Lock(object): @@ -3920,10 +3922,13 @@ self.interp_operations(f, []) def test_external_call(self): + eci = ExternalCompilationInfo() + if sys.platform == "win32": + eci = ExternalCompilationInfo(libraries=["msvcrt"]) from rpython.rlib.objectmodel import invoke_around_extcall T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T) + external = rffi.llexternal("time", [T], rffi.TIME_T, compilation_info=eci) class Oups(Exception): pass diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -294,6 +294,54 @@ assert res == f(10) self.check_simple_loop(call=3) + def test_dict_eq_can_release_gil(self): + from rpython.rtyper.lltypesystem import lltype, rffi + if type(self.newdict()) is not dict: + py.test.skip("this is an r_dict test") + T = rffi.CArrayPtr(rffi.TIME_T) + external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True) + myjitdriver = JitDriver(greens = [], reds = ['total', 'dct']) + def key(x): + return x % 2 + def eq(x, y): + external(lltype.nullptr(T.TO)) + return (x % 2) == (y % 2) + + def f(n): + dct = objectmodel.r_dict(eq, key) + total = n + x = 44444 + y = 55555 + z = 66666 + while total: + myjitdriver.jit_merge_point(total=total, dct=dct) + dct[total] = total + x = dct[total] + y = dct[total] + z = dct[total] + total -= 1 + return len(dct) + x + y + z + + res = self.meta_interp(f, [10], listops=True) + assert res == 2 + 1 + 1 + 1 + self.check_simple_loop(call_may_force=4, # ll_dict_lookup_trampoline + call=1) # ll_dict_setitem_lookup_done_trampoline + + def test_bug42(self): + myjitdriver = JitDriver(greens = [], reds = 'auto') + def f(n): + mdict = {0: None, 1: None, 2: None, 3: None, 4: None, + 5: None, 6: None, 7: None, 8: None, 9: None} + while n > 0: + myjitdriver.jit_merge_point() + n -= 1 + if n in mdict: + del mdict[n] + if n in mdict: + raise Exception + self.meta_interp(f, [10]) + self.check_simple_loop(call_may_force=0, call=3) + class TestLLtype(DictTests, LLJitMixin): pass diff --git a/rpython/rlib/test/test_clibffi.py b/rpython/rlib/test/test_clibffi.py --- a/rpython/rlib/test/test_clibffi.py +++ b/rpython/rlib/test/test_clibffi.py @@ -423,11 +423,12 @@ def setup_class(cls): if sys.platform != 'win32': py.test.skip("Handle to libc library, Win-only test") - BaseFfiTest.setup_class(cls) + BaseFfiTest.setup_class() def test_get_libc_handle(self): handle = get_libc_handle() print get_libc_name() - print hex(handle) - assert handle != 0 - assert handle % 0x1000 == 0 + print dir(handle) + addr = rffi.cast(rffi.INT, handle) + assert addr != 0 + assert addr % 0x1000 == 0 diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -25,7 +25,7 @@ def as_unicode(self): return self.unistr -class BasePosixUnicode: +class BasePosixUnicodeOrAscii: def setup_method(self, method): self.ufilename = self._get_filename() try: @@ -34,9 +34,12 @@ py.test.skip("encoding not good enough") f.write("test") f.close() - - self.path = UnicodeWithEncoding(self.ufilename) - self.path2 = UnicodeWithEncoding(self.ufilename + ".new") + if sys.platform == 'win32' and isinstance(self.ufilename, str): + self.path = self.ufilename + self.path2 = self.ufilename + ".new" + else: + self.path = UnicodeWithEncoding(self.ufilename) + self.path2 = UnicodeWithEncoding(self.ufilename + ".new") def test_open(self): def f(): @@ -55,8 +58,11 @@ def test_stat(self): def f(): return rposix.stat(self.path).st_mtime - - assert interpret(f, []) == os.stat(self.ufilename).st_mtime + if sys.platform == 'win32': + # double vs. float, be satisfied with sub-millisec resolution + assert abs(interpret(f, []) - os.stat(self.ufilename).st_mtime) < 1e-4 + else: + assert interpret(f, []) == os.stat(self.ufilename).st_mtime def test_access(self): def f(): @@ -96,7 +102,11 @@ if sys.platform == 'win32': def f(): - return u', '.join(rposix.listdir(udir)) + if isinstance(udir.as_unicode(), str): + _udir = udir.as_unicode() + else: + _udir = udir + return u', '.join(rposix.listdir(_udir)) result = interpret(f, []) assert os.path.basename(self.ufilename) in ll_to_string(result) else: @@ -149,11 +159,11 @@ interpret(f, []) # does not crash -class TestPosixAscii(BasePosixUnicode): +class TestPosixAscii(BasePosixUnicodeOrAscii): def _get_filename(self): return str(udir.join('test_open_ascii')) -class TestPosixUnicode(BasePosixUnicode): +class TestPosixUnicode(BasePosixUnicodeOrAscii): def _get_filename(self): return (unicode(udir.join('test_open')) + u'\u65e5\u672c.txt') # "Japan" diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -358,6 +358,12 @@ if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): + functype = ctypes.CFUNCTYPE + if sys.platform == 'win32': + from rpython.rlib.clibffi import FFI_STDCALL, FFI_DEFAULT_ABI + if getattr(T.TO, 'ABI', FFI_DEFAULT_ABI) == FFI_STDCALL: + # for win32 system call + functype = ctypes.WINFUNCTYPE argtypes = [get_ctypes_type(ARG) for ARG in T.TO.ARGS if ARG is not lltype.Void] if T.TO.RESULT is lltype.Void: @@ -366,10 +372,10 @@ restype = get_ctypes_type(T.TO.RESULT) try: kwds = {'use_errno': True} - return ctypes.CFUNCTYPE(restype, *argtypes, **kwds) + return functype(restype, *argtypes, **kwds) except TypeError: # unexpected 'use_errno' argument, old ctypes version - return ctypes.CFUNCTYPE(restype, *argtypes) + return functype(restype, *argtypes) elif isinstance(T.TO, lltype.OpaqueType): return ctypes.c_void_p else: diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -537,7 +537,7 @@ class FuncType(ContainerType): _gckind = 'raw' __name__ = 'func' - def __init__(self, args, result): + def __init__(self, args, result, abi='FFI_DEFAULT_ABI'): for arg in args: assert isinstance(arg, LowLevelType) # There are external C functions eating raw structures, not @@ -547,6 +547,7 @@ if isinstance(result, ContainerType): raise TypeError, "function result can only be primitive or pointer" self.RESULT = result + self.ABI = abi def __str__(self): args = ', '.join(map(str, self.ARGS)) From noreply at buildbot.pypy.org Wed Mar 26 00:56:05 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 26 Mar 2014 00:56:05 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to py3, which allows bytes or unicode here Message-ID: <20140325235605.D980E1D23C3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70293:1e0b20cb98d7 Date: 2014-03-25 16:55 -0700 http://bitbucket.org/pypy/pypy/changeset/1e0b20cb98d7/ Log: adapt to py3, which allows bytes or unicode here diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -772,10 +772,10 @@ assert u'\u040a'.encode('mbcs') == b'?' # some cyrillic letter assert b'cafx\e9'.decode('mbcs') == u'cafx\e9' - def test_bad_handler_string_result(self): + def test_handler_string_result(self): import _codecs def f(exc): return (b'foo', exc.end) _codecs.register_error("test.test_codecs_not_a_string", f) - raises(TypeError, u'\u1234'.encode, 'ascii', - 'test.test_codecs_not_a_string') + result = '\u1234'.encode('ascii', 'test.test_codecs_not_a_string') + assert result == b'foo' From noreply at buildbot.pypy.org Wed Mar 26 02:41:22 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 26 Mar 2014 02:41:22 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Removed the global model.w_nil instance. Only one nil-instance per objectspace. Message-ID: <20140326014122.EF6CC1C066C@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r697:87df1c2dc1ae Date: 2014-03-26 02:40 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/87df1c2dc1ae/ Log: Removed the global model.w_nil instance. Only one nil-instance per objectspace. Had to fix tests because they were mixing different object space instances. Also refactored tests to remove code duplication and clean up. All green. Removed test_strategies.py for now. diff too long, truncating to 2000 out of 2059 lines diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -161,7 +161,7 @@ else: w_selector = selector - w_method = model.W_CompiledMethod(header=512) + w_method = model.W_CompiledMethod(self.space, header=512) w_method.literalatput0(self.space, 1, w_selector) assert len(arguments_w) <= 7 w_method.setbytes([chr(131), chr(len(arguments_w) << 5 + 0), chr(124)]) #returnTopFromMethod diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -558,7 +558,7 @@ self.store(space, i, collection[i]) i = i+1 while i < my_length: - self.store(space, i, w_nil) + self.store(space, i, space.w_nil) i = i+1 def at0(self, space, index0): @@ -1090,9 +1090,9 @@ _shadow = None # Default value _likely_methodname = "" - def __init__(self, bytecount=0, header=0): + def __init__(self, space, bytecount=0, header=0): self._shadow = None - self.setheader(header) + self.setheader(space, header) self.bytes = ["\x00"] * bytecount def fillin(self, space, g_self): @@ -1117,7 +1117,7 @@ return True def clone(self, space): - copy = W_CompiledMethod(0, self.getheader()) + copy = W_CompiledMethod(space, 0, self.getheader()) copy.bytes = list(self.bytes) copy.literals = list(self.literals) return copy @@ -1198,10 +1198,10 @@ def getheader(self): return self.header - def setheader(self, header): + def setheader(self, space, header): primitive, literalsize, islarge, tempsize, argsize = constants.decode_compiled_method_header(header) self.literalsize = literalsize - self.literals = [w_nil] * self.literalsize + self.literals = [space.w_nil] * self.literalsize self.header = header self.argsize = argsize self.tempsize = tempsize @@ -1233,7 +1233,7 @@ def literalatput0(self, space, index0, w_value): if index0 == 0: header = space.unwrap_int(w_value) - self.setheader(header) + self.setheader(space, header) else: self.literals[index0-1] = w_value if self.has_shadow(): @@ -1283,10 +1283,3 @@ def __init__(self, old_shadow, new_shadow_class): self.old_shadow = old_shadow self.new_shadow_class = new_shadow_class - -# Use black magic to create w_nil without running the constructor, -# thus allowing it to be used even in the constructor of its own -# class. Note that we patch its class in the space -# YYY there should be no global w_nil -w_nil = instantiate(W_PointersObject) -w_nil.w_class = None diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -9,8 +9,17 @@ class ObjSpace(object): def __init__(self): self.classtable = {} + self.objtable = {} self._executable_path = [""] # XXX: we cannot set the attribute # directly on the frozen objectspace + + # Create the nil object. + # Circumvent the constructor because nil is already referenced there. + w_nil = instantiate(model.W_PointersObject) + w_nil.w_class = None + w_nil.space = self + self.add_bootstrap_object("w_nil", w_nil) + self.make_bootstrap_classes() self.make_bootstrap_objects() @@ -43,38 +52,38 @@ setattr(self, name, cls) # Make sure that all prebuilt classes are actually used in the special classes array + def add_bootstrap_object(self, name, obj): + self.objtable[name] = obj + setattr(self, name, obj) + + def make_bootstrap_object(self, name): + obj = model.W_PointersObject(self, None, 0) + self.add_bootstrap_object(name, obj) + def make_bootstrap_objects(self): - def bld_char(i): + self.make_bootstrap_object("w_true") + self.make_bootstrap_object("w_false") + self.make_bootstrap_object("w_special_selectors") + self.add_bootstrap_object("w_minus_one", model.W_SmallInteger(-1)) + self.add_bootstrap_object("w_zero", model.W_SmallInteger(0)) + self.add_bootstrap_object("w_one", model.W_SmallInteger(1)) + self.add_bootstrap_object("w_two", model.W_SmallInteger(2)) + + def build_char(i): # TODO - This is pretty hacky, maybe not required? At least eliminate the constant 1. w_cinst = model.W_PointersObject(self, self.w_Character, 1) w_cinst.store(self, constants.CHARACTER_VALUE_INDEX, model.W_SmallInteger(i)) return w_cinst - w_charactertable = model.W_PointersObject(self, self.classtable['w_Array'], 256) - self.w_charactertable = w_charactertable + char_table = model.W_PointersObject(self, self.classtable['w_Array'], 256) for i in range(256): - self.w_charactertable.store(self, i, bld_char(i)) - - w_nil = self.w_nil = model.w_nil - w_nil.space = self - w_true = model.W_PointersObject(self, None, 0) - self.w_true = w_true - w_false = model.W_PointersObject(self, None, 0) - self.w_false = w_false - self.w_minus_one = model.W_SmallInteger(-1) - self.w_zero = model.W_SmallInteger(0) - self.w_one = model.W_SmallInteger(1) - self.w_two = model.W_SmallInteger(2) - w_special_selectors = model.W_PointersObject(self, None, 0) - self.w_special_selectors = w_special_selectors - - self.objtable = {} + char_table.store(self, i, build_char(i)) + self.add_bootstrap_object("w_charactertable", char_table) + for name in constants.objects_in_special_object_table: name = "w_" + name - try: - self.objtable[name] = locals()[name] - except KeyError, e: - self.objtable[name] = None + if not name in self.objtable: + self.add_bootstrap_object(name, None) @specialize.arg(1) def get_special_selector(self, selector): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -593,7 +593,7 @@ @expose_primitive(NEW_METHOD, unwrap_spec=[object, int, int]) def func(interp, s_frame, w_class, bytecount, header): # We ignore w_class because W_CompiledMethod is special - w_method = model.W_CompiledMethod(bytecount, header) + w_method = model.W_CompiledMethod(s_frame.space, bytecount, header) return w_method # ___________________________________________________________________________ diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -48,7 +48,7 @@ self.initialize_storage(space, size) def initialize_storage(self, space, size): - self.storage = [model.w_nil] * size + self.storage = [space.w_nil] * size def fetch(self, n0): return self.storage[n0] def store(self, n0, w_value): @@ -66,11 +66,11 @@ def __init__(self, space, w_self, size): AbstractShadow.__init__(self, space, w_self) - self.storage = [weakref.ref(model.w_nil)] * size + self.storage = [weakref.ref(space.w_nil)] * size def fetch(self, n0): weakobj = self.storage[n0] - return weakobj() or model.w_nil + return weakobj() or self.space.w_nil def store(self, n0, w_value): assert w_value is not None self.storage[n0] = weakref.ref(w_value) @@ -200,7 +200,7 @@ self.changed() def store_w_superclass(self, w_class): - if w_class is None or w_class.is_same_object(model.w_nil): + if w_class is None or w_class.is_same_object(self.space.w_nil): self._s_superclass = None else: assert isinstance(w_class, model.W_PointersObject) @@ -241,7 +241,7 @@ elif self.instance_kind == BYTES: w_new = model.W_BytesObject(self.space, w_cls, extrasize) elif self.instance_kind == COMPILED_METHOD: - w_new = model.W_CompiledMethod(extrasize) + w_new = model.W_CompiledMethod(self.space, extrasize) elif self.instance_kind == FLOAT: w_new = model.W_Float(0) # Squeak gives a random piece of memory elif self.instance_kind == LARGE_POSITIVE_INTEGER: @@ -413,6 +413,7 @@ # raise ClassShadowError("bogus selector in method dict") w_compiledmethod = w_values.fetch(self.space, i) if not isinstance(w_compiledmethod, model.W_CompiledMethod): + import pdb; pdb.set_trace() raise ClassShadowError("The methoddict must contain " "CompiledMethods only, for now. " "If the value observed is nil, our " diff --git a/spyvm/strategies.py b/spyvm/strategies.py --- a/spyvm/strategies.py +++ b/spyvm/strategies.py @@ -99,9 +99,9 @@ strategy_tag = 'allnil' def can_contain(self, space, w_obj): - return w_obj == model.w_nil + return w_obj == space.w_nil def fetch(self, space, w_obj, n0): - return model.w_nil + return space.w_nil def do_store(self, space, w_obj, n0, w_val): pass @@ -129,7 +129,7 @@ # TODO enable generalization by maintaining a counter of elements that are nil. self.storage(w_obj)[n0] = w_val def initial_storage(self, space, size): - return [model.w_nil] * size + return [space.w_nil] * size def storage_for_list(self, space, collection): return [x for x in collection] def copy_storage_from(self, space, w_obj, reuse_storage=False): @@ -147,7 +147,7 @@ return longlong2float.float2longlong(val) == self.nil_value_longlong def can_contain(self, space, w_val): - return w_val == model.w_nil or \ + return w_val == space.w_nil or \ (isinstance(w_val, self.wrapper_class) \ and not self.is_nil_value(self.unwrap(space, w_val))) @@ -160,7 +160,7 @@ def do_store(self, space, w_obj, n0, w_val): store = self.storage(w_obj) - if w_val == model.w_nil: + if w_val == space.w_nil: store[n0] = self.nil_value else: store[n0] = self.unwrap(space, w_val) @@ -172,7 +172,7 @@ length = len(collection) store = self.initial_storage(space, length) for i in range(length): - if collection[i] != model.w_nil: + if collection[i] != space.w_nil: store[i] = self.unwrap(space, collection[i]) return store diff --git a/spyvm/test/test_bitblt.py b/spyvm/test/test_bitblt.py --- a/spyvm/test/test_bitblt.py +++ b/spyvm/test/test_bitblt.py @@ -1,28 +1,9 @@ from spyvm import model, shadow, constants, interpreter, objspace from spyvm.plugins import bitblt -from .util import BootstrappedObjSpace +from .util import create_space -space = BootstrappedObjSpace() - -# copy from test_miniimage -def w(any): - # XXX could put this on the space? - if any is None: - return space.w_nil - if isinstance(any, str): - # assume never have strings of length 1 - if len(any) == 1: - return space.wrap_chr(any) - else: - return space.wrap_string(any) - if isinstance(any, bool): - return space.wrap_bool(any) - if isinstance(any, int): - return space.wrap_int(any) - if isinstance(any, float): - return space.wrap_float(any) - else: - raise Exception +space = create_space() +w = space.w def make_form(bits, width, height, depth, o_x=0, o_y=0): w_f = model.W_PointersObject(space, space.w_Array, 5) @@ -37,7 +18,6 @@ return w_f def test_bitBlt_values(): - w_bb = model.W_PointersObject(space, space.w_Array, 15) w_bb.store(space, 0, make_form([0] * 1230 * 20, 1230, 20, 1)) w_bb.store(space, 1, w_bb.fetch(space, 0)) diff --git a/spyvm/test/test_bootstrappedimage.py b/spyvm/test/test_bootstrappedimage.py --- a/spyvm/test/test_bootstrappedimage.py +++ b/spyvm/test/test_bootstrappedimage.py @@ -1,33 +1,18 @@ import py from spyvm import squeakimage, model, constants from spyvm import interpreter, shadow -from spyvm.test import test_miniimage as tools -from spyvm.test.test_miniimage import perform, w +from .util import read_image def setup(): - tools.setup_module(tools, filename='bootstrapped.image') - test_initialize_string_class() - -def find_symbol_in_methoddict_of(string, s_class): - s_methoddict = s_class.s_methoddict() - s_methoddict.sync_method_cache() - methoddict_w = s_methoddict.methoddict - for each in methoddict_w.keys(): - if each.as_string() == string: - return each - -def initialize_class(w_class): - initialize_symbol = find_symbol_in_methoddict_of("initialize", - w_class.class_shadow(tools.space)) - perform(w_class, initialize_symbol) - -def test_initialize_string_class(): - #initialize String class, because equality testing requires a class var set. - initialize_class(w("string").getclass(tools.space)) + import spyvm.test.test_bootstrappedimage as mod + mod.space, mod.interp, mod.image, mod.reader = read_image("bootstrapped.image") + mod.w = space.w + mod.perform = interp.perform + mod.space.initialize_class(mod.space.w_String, mod.interp) def test_symbol_asSymbol(): - w_result = perform(tools.image.w_asSymbol, "asSymbol") - assert w_result is tools.image.w_asSymbol + w_result = perform(image.w_asSymbol, "asSymbol") + assert w_result is image.w_asSymbol def test_create_new_symbol(): py.test.skip("This test takes quite long and is actually included in test_retrieve_symbol.") @@ -50,5 +35,7 @@ assert w_result is w_anotherSymbol def test_all_pointers_are_valid(): - tools.test_all_pointers_are_valid() - tools.test_lookup_abs_in_integer() + from test_miniimage import _test_all_pointers_are_valid + from test_miniimage import _test_lookup_abs_in_integer + _test_all_pointers_are_valid(reader) + _test_lookup_abs_in_integer(interp) diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1,16 +1,16 @@ -import py -from .util import bootstrap_class as _bootstrap_class -from spyvm import model, interpreter, primitives, shadow -from spyvm import objspace, wrapper, constants -from .util import BootstrappedObjSpace +import py, operator, sys +from spyvm import model, interpreter, primitives, shadow, objspace, wrapper, constants +from .util import create_space_interp +from spyvm.wrapper import PointWrapper +from spyvm.conftest import option -def bootstrap_class(space, instsize, w_superclass=None, w_metaclass=None, +space, interp = create_space_interp() + +def bootstrap_class(instsize, w_superclass=None, w_metaclass=None, name='?', format=shadow.POINTERS, varsized=True): - return _bootstrap_class(space, instsize, w_superclass, w_metaclass, + return space.bootstrap_class(instsize, w_superclass, w_metaclass, name, format, varsized) -space = BootstrappedObjSpace() -interp = interpreter.Interpreter(space) def step_in_interp(ctxt): # due to missing resets in between tests interp._loop = False try: @@ -45,7 +45,7 @@ # Install faked compiled methods that just invoke the primitive: for (w_class, primnum, argsize, methname) in methods: s_class = w_class.as_class_get_shadow(space) - prim_meth = model.W_CompiledMethod(0) + prim_meth = model.W_CompiledMethod(space, 0) prim_meth.primitive = primnum prim_meth.argsize = argsize symbol = fakesymbol(methname) @@ -88,19 +88,24 @@ return lit return [fakeliteral(lit) for lit in literals] -def new_frame(bytes, receiver=space.w_nil, space=space): +def _new_frame(space, bytes, receiver=None): assert isinstance(bytes, str) - w_method = model.W_CompiledMethod(len(bytes)) + w_method = model.W_CompiledMethod(space, len(bytes)) w_method.islarge = 1 w_method.bytes = bytes w_method.argsize=2 w_method.tempsize=8 w_method.setliterals([model.W_PointersObject(space, None, 2)]) + if receiver is None: + receiver = space.w_nil s_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space, receiver, ["foo", "bar"]) return s_frame.w_self(), s_frame +def new_frame(bytes, receiver=space.w_nil, space=space): + return _new_frame(space, bytes, receiver) + def test_create_frame(): - w_method = model.W_CompiledMethod(len("hello")) + w_method = model.W_CompiledMethod(space, len("hello")) w_method.bytes="hello" w_method.islarge = 1 w_method.argsize=2 @@ -145,7 +150,7 @@ def test_pushReceiverVariableBytecode(bytecode = (pushReceiverVariableBytecode(0) + pushReceiverVariableBytecode(1) + pushReceiverVariableBytecode(2))): - w_demo = bootstrap_class(space, 3).as_class_get_shadow(space).new() + w_demo = bootstrap_class(3).as_class_get_shadow(space).new() w_demo.store(space, 0, "egg") w_demo.store(space, 1, "bar") w_demo.store(space, 2, "baz") @@ -180,7 +185,7 @@ fakesymbol("c")] def test_pushLiteralVariableBytecode(bytecode=pushLiteralVariableBytecode(0)): - w_association = bootstrap_class(space, 2).as_class_get_shadow(space).new() + w_association = bootstrap_class(2).as_class_get_shadow(space).new() w_association.store(space, 0, "mykey") w_association.store(space, 1, "myvalue") w_frame, s_frame = new_frame(bytecode) @@ -190,7 +195,7 @@ def test_storeAndPopReceiverVariableBytecode(bytecode=storeAndPopReceiverVariableBytecode, popped=True): - shadow = bootstrap_class(space, 8).as_class_get_shadow(space) + shadow = bootstrap_class(8).as_class_get_shadow(space) for index in range(8): w_object = shadow.new() w_frame, s_frame = new_frame(pushConstantTrueBytecode + bytecode(index)) @@ -363,8 +368,8 @@ assert s_frame.stack() == [] def test_bytecodePrimNew(): - w_fakeclassclass = bootstrap_class(space, 10, name='fakeclassclass') - w_fakeclass = bootstrap_class(space, 1, name='fakeclass', varsized=False, + w_fakeclassclass = bootstrap_class(10, name='fakeclassclass') + w_fakeclass = bootstrap_class(1, name='fakeclass', varsized=False, w_metaclass=w_fakeclassclass) w_frame, s_frame = new_frame(bytecodePrimNew) s_frame.push(w_fakeclass) @@ -378,8 +383,8 @@ assert w_fakeinst.size() == 1 def test_bytecodePrimNewWithArg(): - w_fakeclassclass = bootstrap_class(space, 10, name='fakeclassclass') - w_fakeclass = bootstrap_class(space, 1, name='fakeclass', varsized=True, + w_fakeclassclass = bootstrap_class(10, name='fakeclassclass') + w_fakeclass = bootstrap_class(1, name='fakeclass', varsized=True, w_metaclass=w_fakeclassclass) w_frame, s_frame = new_frame(bytecodePrimNewWithArg) s_frame.push(w_fakeclass) @@ -394,7 +399,7 @@ assert w_fakeinst.size() == 3 def test_bytecodePrimSize(): - w_fakeclass = bootstrap_class(space, 2, name='fakeclass', varsized=True) + w_fakeclass = bootstrap_class(2, name='fakeclass', varsized=True) w_fakeinst = w_fakeclass.as_class_get_shadow(space).new(5) w_frame, s_frame = new_frame(bytecodePrimSize) s_frame.push(w_fakeinst) @@ -416,7 +421,7 @@ (returnNil, space.w_nil), (returnTopFromMethod, space.w_one) ]: shadow = w_class.as_class_get_shadow(space) - w_method = model.W_CompiledMethod(2) + w_method = model.W_CompiledMethod(space, 2) w_method.bytes = pushConstantOneBytecode + bytecode literals = fakeliterals(space, "foo") w_foo = literals[0] @@ -438,14 +443,14 @@ assert s_active_context.stack() == [result] def test_sendLiteralSelectorBytecode(): - w_class = bootstrap_class(space, 0) + w_class = bootstrap_class(0) w_object = w_class.as_class_get_shadow(space).new() sendBytecodesTest(w_class, w_object, sendLiteralSelectorBytecode(0)) def test_fibWithArgument(): bytecode = ''.join(map(chr, [ 16, 119, 178, 154, 118, 164, 11, 112, 16, 118, 177, 224, 112, 16, 119, 177, 224, 176, 124 ])) - shadow = bootstrap_class(space, 0).as_class_get_shadow(space) - method = model.W_CompiledMethod(len(bytecode)) + shadow = bootstrap_class(0).as_class_get_shadow(space) + method = model.W_CompiledMethod(space, len(bytecode)) method.literalsize = 1 method.bytes = bytecode method.argsize = 1 @@ -487,7 +492,6 @@ step_in_interp(s_frame) step_in_interp(s_frame) w_point = s_frame.top() - from spyvm.wrapper import PointWrapper point = PointWrapper(interp.space, w_point) assert point.x() == 0 assert point.y() == 1 @@ -563,7 +567,7 @@ test_pushLiteralVariableBytecode(extendedPushBytecode + chr((3<<6) + 0)) def storeAssociation(bytecode): - w_association = bootstrap_class(space, 2).as_class_get_shadow(space).new() + w_association = bootstrap_class(2).as_class_get_shadow(space).new() w_association.store(space, 0, "mykey") w_association.store(space, 1, "myvalue") w_frame, s_frame = new_frame(pushConstantOneBytecode + bytecode) @@ -585,8 +589,8 @@ def test_callPrimitiveAndPush_fallback(): w_frame, s_frame = new_frame(bytecodePrimAdd) - shadow = bootstrap_class(space, 0).as_class_get_shadow(space) - w_method = model.W_CompiledMethod(0) + shadow = bootstrap_class(0).as_class_get_shadow(space) + w_method = model.W_CompiledMethod(space, 0) w_method.argsize = 1 w_method.tempsize = 1 w_method.literalsize = 1 @@ -621,30 +625,30 @@ space.w_false, space.w_true] def test_singleExtendedSendBytecode(): - w_class = bootstrap_class(space, 0) + w_class = bootstrap_class(0) w_object = w_class.as_class_get_shadow(space).new() sendBytecodesTest(w_class, w_object, singleExtendedSendBytecode + chr((0<<5)+0)) def test_singleExtendedSuperBytecode(bytecode=singleExtendedSuperBytecode + chr((0<<5) + 0)): - w_supersuper = bootstrap_class(space, 0) - w_super = bootstrap_class(space, 0, w_superclass=w_supersuper) - w_class = bootstrap_class(space, 0, w_superclass=w_super) + w_supersuper = bootstrap_class(0) + w_super = bootstrap_class(0, w_superclass=w_supersuper) + w_class = bootstrap_class(0, w_superclass=w_super) w_object = w_class.as_class_get_shadow(space).new() # first call method installed in w_class bytecodes = singleExtendedSendBytecode + chr(0) # which does a call to its super - meth1 = model.W_CompiledMethod(2) + meth1 = model.W_CompiledMethod(space, 2) meth1.bytes = pushReceiverBytecode + bytecode literals = fakeliterals(space, "foo") foo = literals[0] meth1.setliterals(literals) w_class.as_class_get_shadow(space).installmethod(foo, meth1) # and that one again to its super - meth2 = model.W_CompiledMethod(2) + meth2 = model.W_CompiledMethod(space, 2) meth2.bytes = pushReceiverBytecode + bytecode meth2.setliterals(fakeliterals(space, foo)) w_super.as_class_get_shadow(space).installmethod(foo, meth2) - meth3 = model.W_CompiledMethod(0) + meth3 = model.W_CompiledMethod(space, 0) w_supersuper.as_class_get_shadow(space).installmethod(foo, meth3) w_frame, s_frame = new_frame(bytecodes) s_frame.w_method().setliterals(literals) @@ -665,12 +669,12 @@ assert s_caller_context.stack() == [] def test_secondExtendedSendBytecode(): - w_class = bootstrap_class(space, 0) + w_class = bootstrap_class(0) w_object = w_class.as_class_get_shadow(space).new() sendBytecodesTest(w_class, w_object, secondExtendedSendBytecode + chr(0)) def test_doubleExtendedDoAnythinBytecode(): - w_class = bootstrap_class(space, 0) + w_class = bootstrap_class(0) w_object = w_class.as_class_get_shadow(space).new() sendBytecodesTest(w_class, w_object, doubleExtendedDoAnythingBytecode + chr((0<<5) + 0) + chr(0)) @@ -800,7 +804,7 @@ def test_bc_primBytecodeAt_with_instvars(): # ^ self at: 1 - w_fakeclass = bootstrap_class(space, 1, name='fakeclass', varsized=True) + w_fakeclass = bootstrap_class(1, name='fakeclass', varsized=True) w_fakeinst = w_fakeclass.as_class_get_shadow(space).new(1) w_fakeinst.store(space, 0, space.wrap_char("a")) # static slot 0: instance variable w_fakeinst.store(space, 1, space.wrap_char("b")) # varying slot 1 @@ -815,7 +819,7 @@ def test_bc_primBytecodeAtPut_with_instvars(): # ^ self at: 1 put: #b - w_fakeclass = bootstrap_class(space, 1, name='fakeclass', varsized=True) + w_fakeclass = bootstrap_class(1, name='fakeclass', varsized=True) w_fakeinst = w_fakeclass.as_class_get_shadow(space).new(1) w_fakeinst.store(space, 0, space.wrap_char("a")) # static slot 0: instance variable w_fakeinst.store(space, 1, space.wrap_char("a")) # varying slot 1 @@ -835,7 +839,7 @@ # ^ self objectAt: 2. yields the first literal (22) # ^ self objectAt: 2 put: 3. changes the first literal to 3 # ^ self objectAt: 2. yields the new first literal (3) - prim_meth = model.W_CompiledMethod(header=1024) + prim_meth = model.W_CompiledMethod(space, header=1024) prim_meth.setliterals(fakeliterals(space, 22)) oal = fakeliterals(space, "objectAt:") oalp = fakeliterals(space, "objectAt:put:", 3) @@ -855,7 +859,6 @@ def test_runwithtrace(): # We run random tests with the bc_trace option turned on explicitely - from spyvm.conftest import option bc_trace = option.bc_trace option.bc_trace = True test_storeAndPopReceiverVariableBytecode() @@ -969,14 +972,13 @@ # ifTrue: [ 0 ] # ifFalse: [ (testBlock value: aNumber - 1) + aNumber ]]. # ^ testBlock value: 11 - import operator interp = interpreter.Interpreter(space, max_stack_depth=3) #create a method with the correct bytecodes and a literal bytes = reduce(operator.add, map(chr, [0x8a, 0x01, 0x68, 0x10, 0x8f, 0x11, 0x00, 0x11, 0x10, 0x75, 0xb6, 0x9a, 0x75, 0xa4, 0x09, 0x8c, 0x00, 0x01, 0x10, 0x76, 0xb1, 0xca, 0x10, 0xb0, 0x7d, 0x8e, 0x00, 0x00, 0x8c, 0x00, 0x00, 0x20, 0xca, 0x7c])) - w_method = model.W_CompiledMethod(len(bytes)) + w_method = model.W_CompiledMethod(space, len(bytes)) w_method.islarge = 1 w_method.bytes = bytes w_method.argsize=0 @@ -1002,7 +1004,6 @@ class StackTestInterpreter(interpreter.Interpreter): def stack_frame(self, w_frame, may_interrupt=True): - import sys stack_depth = self.max_stack_depth - self.remaining_stack_depth for i in range(stack_depth + 1): assert sys._getframe(4 + i * 6).f_code.co_name == 'c_loop' @@ -1016,7 +1017,6 @@ # ifTrue: [ 2 ] # ifFalse: [ (testBlock value: aNumber - 1) + aNumber ]]. # ^ testBlock value: 11 - import operator interp = StackTestInterpreter(space, max_stack_depth=10) #create a method with the correct bytecodes and a literal bytes = reduce(operator.add, map(chr, [0x8a, 0x01, 0x68, 0x10, 0x8f, 0x11, @@ -1024,7 +1024,7 @@ 0x10, 0x76, 0xb1, 0xca, 0x10, 0xb0, 0x7d, 0x8e, 0x00, 0x00, 0x8c, 0x00, 0x00, 0x20, 0xca, 0x7c])) - w_method = model.W_CompiledMethod(len(bytes)) + w_method = model.W_CompiledMethod(space, len(bytes)) w_method.islarge = 1 w_method.bytes = bytes w_method.argsize=0 @@ -1041,7 +1041,6 @@ assert False def test_c_stack_reset_on_sender_chain_manipulation(): - import operator bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) w_frame, s_frame = new_frame(bytes) s_frame.store_w_receiver(w_frame) diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -1,38 +1,20 @@ -import py -import operator -from spyvm import squeakimage, model, constants, error -from spyvm import interpreter, shadow, primitives -from spyvm.test import test_miniimage as tools -from spyvm.test.test_miniimage import perform, w +import py, operator +from spyvm import squeakimage, model, constants, error, interpreter, shadow, primitives from spyvm.test.test_primitives import MockFrame - +from .util import read_image, find_symbol_in_methoddict_of from rpython.rlib.rarithmetic import intmask, r_uint -space, interp = tools.setup_module(tools, filename='bootstrapped.image') - -def find_symbol_in_methoddict_of(string, s_class): - s_methoddict = s_class.s_methoddict() - s_methoddict.sync_method_cache() - methoddict_w = s_methoddict.methoddict - for each in methoddict_w.keys(): - if each.as_string() == string: - return each - -def initialize_class(w_class): - initialize_symbol = find_symbol_in_methoddict_of("initialize", - w_class.class_shadow(tools.space)) - perform(w_class, initialize_symbol) - -def test_initialize_string_class(): - interp.trace = False - #initialize String class, because equality testing requires a class var set. - initialize_class(w("string").getclass(tools.space)) +space, interp, _, _ = read_image('bootstrapped.image') +w = space.w +perform = interp.perform +interp.trace = False +space.initialize_class(space.w_String, interp) def perform_primitive(rcvr, w_selector, *args): code = rcvr.class_shadow(space).lookup(w_selector).primitive() assert code func = primitives.prim_holder.prim_table[code] - s_frame = MockFrame([rcvr] + list(args)).as_context_get_shadow(space) + s_frame = MockFrame(space, [rcvr] + list(args)).as_context_get_shadow(space) func(interp, s_frame, len(args)) return s_frame.pop() diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -1,31 +1,19 @@ -# ----- mini.image productline ------------------------------- -# NOT relying on order of methods -# using setup_module(module) now -import py -from spyvm import squeakimage, model, constants, interpreter, shadow, objspace -from .util import BootstrappedObjSpace -# lazy initialization of test data, ie ImageReader and Float class +import py, math +from spyvm import squeakimage, model, constants, interpreter, shadow, objspace, wrapper, primitives +from .util import read_image, open_reader -def setup_module(module, filename='mini.image'): - space = BootstrappedObjSpace() - from spyvm.tool.analyseimage import image_dir - module.mini_image = image_dir.join(filename) - module.reader = open_miniimage(space) - reader.initialize() - module.image = squeakimage.SqueakImage() - module.image.from_reader(space, reader) - module.space = space - module.interp = interpreter.Interpreter(space, image) - return space, module.interp +space, interp, image, reader = read_image("mini.image") +w = space.w +perform = interp.perform + +def open_miniimage(): + return open_reader(space, "mini.image") def find_symbol(name): if name == "asSymbol": return image.w_asSymbol return perform(space.wrap_string(name), "asSymbol") -def open_miniimage(space): - return squeakimage.reader_for_image(space, squeakimage.Stream(mini_image.open(mode="rb"))) - def get_reader(): return reader @@ -38,32 +26,30 @@ # ------ tests ------------------------------------------ -def test_miniimageexists(): - assert mini_image.check(dir=False) - def test_read_header(): - reader = open_miniimage(space) + reader = open_miniimage() reader.read_header() assert reader.endofmemory == 726592 assert reader.oldbaseaddress == -1221464064 assert reader.specialobjectspointer == -1221336216 def test_read_all_header(): - reader = open_miniimage(space) + reader = open_miniimage() reader.read_header() next = reader.stream.peek() assert next != 0 #expects object header, which must not be 0x00000000 - -def test_all_pointers_are_valid(): - reader = get_reader() +def _test_all_pointers_are_valid(reader): for each in reader.chunks.itervalues(): if each.format < 5: for pointer in each.data: if (pointer & 1) != 1: assert pointer in reader.chunks - +def test_all_pointers_are_valid(): + reader = get_reader() + _test_all_pointers_are_valid(reader) + def test_there_are_31_compact_classes(): reader = get_reader() assert len(reader.compactclasses) == 31 @@ -180,15 +166,15 @@ SO_LARGENEGATIVEINTEGER_CLASS = 42 """ - - -def test_lookup_abs_in_integer(): - w_abs = interp.perform(w("abs"), "asSymbol") +def _test_lookup_abs_in_integer(interp): + w_abs = interp.perform(interp.space.w("abs"), "asSymbol") for value in [10, -3, 0]: w_object = model.W_SmallInteger(value) w_res = interp.perform(w_object, w_abs) assert w_res.value == abs(value) +def test_lookup_abs_in_integer(): + _test_lookup_abs_in_integer(interp) def test_map_mirrors_to_classtable(): w_compiledmethod_class = image.special(constants.SO_COMPILEDMETHOD_CLASS) @@ -202,7 +188,6 @@ def test_runimage(): py.test.skip("This method actually runs an image. Fails since no graphical primitives yet") - from spyvm import wrapper ap = wrapper.ProcessWrapper(space, wrapper.scheduler(space).active_process()) w_ctx = ap.suspended_context() ap.store_suspended_context(space.w_nil) @@ -218,26 +203,6 @@ perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) assert perform(w(10), "fib").is_same_object(w(89)) - -def w(any): - # XXX could put this on the space? - if any is None: - return space.w_nil - if isinstance(any, str): - # assume never have strings of length 1 - if len(any) == 1: - return space.wrap_chr(any) - else: - return space.wrap_string(any) - if isinstance(any, bool): - return space.wrap_bool(any) - if isinstance(any, int): - return space.wrap_int(any) - if isinstance(any, float): - return space.wrap_float(any) - else: - raise Exception - def test_become(): sourcecode = """ testBecome @@ -264,12 +229,7 @@ w_result = perform(w(10), "testBecome") assert space.unwrap_int(w_result) == 42 -def perform(w_receiver, selector, *arguments_w): - return interp.perform(w_receiver, selector, *arguments_w) - - def test_step_forged_image(): - from spyvm import wrapper ap = wrapper.ProcessWrapper(space, wrapper.scheduler(space).active_process()) s_ctx = ap.suspended_context().as_context_get_shadow(space) assert isinstance(s_ctx, shadow.MethodContextShadow) @@ -305,7 +265,6 @@ assert w_res.size() == 0 def test_pi_as_w_float(): - import math w_result = perform(interp.space.w_Float, "pi") assert w_result is not None assert isinstance(w_result, model.W_Float) @@ -325,7 +284,6 @@ assert w_result.value == 1.1 def test_existing_large_positive_integer_as_W_LargePositiveInteger1Word(): - import math w_result = perform(interp.space.w_Float, "pi") assert w_result is not None assert isinstance(w_result, model.W_Float) @@ -355,9 +313,8 @@ assert w_dnu.as_string() == "doesNotUnderstand:" def test_run_doesNotUnderstand(): - from spyvm.test import test_miniimage - setup_module(test_miniimage, filename='running-something-mini.image') - w_result = test_miniimage.interp.perform(test_miniimage.interp.space.wrap_int(0), "runningADNU") + space, interp, _, _ = read_image('running-something-mini.image') + w_result = interp.perform(interp.space.wrap_int(0), "runningADNU") assert isinstance(w_result, model.W_BytesObject) assert w_result.as_string() == "foobarThis:doesNotExist:('pypy' 'heya' )" @@ -371,9 +328,7 @@ assert isinstance(w_message, model.W_PointersObject) def test_step_run_something(): - from spyvm.test import test_miniimage - setup_module(test_miniimage, filename='running-something-mini.image') - from spyvm import wrapper + space, interp, _, _ = read_image('running-something-mini.image') ap = wrapper.ProcessWrapper(space, wrapper.scheduler(space).active_process()) w_ctx = ap.suspended_context() s_ctx = w_ctx.as_context_get_shadow(space) @@ -391,9 +346,7 @@ assert s_ctx.top().value == 3 def test_primitive_perform_with_args(): - # this test should be last, because importing test_primitives has some (unknown) side-effects - from spyvm.test.test_primitives import prim - from spyvm import primitives + from spyvm.test.test_primitives import _prim w_o = space.wrap_list([1, 2, 3]) w_methoddict = w_o.class_shadow(space)._s_superclass._s_superclass.w_methoddict() w_methoddict.as_methoddict_get_shadow(space).sync_method_cache() @@ -402,5 +355,5 @@ for sel in selectors_w: if sel.as_string() == 'size': w_sel = sel - size = prim(primitives.PERFORM_WITH_ARGS, [w_o, w_sel, []]) + size = _prim(space, primitives.PERFORM_WITH_ARGS, [w_o, w_sel, []]) assert size.value == 3 diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -1,13 +1,12 @@ -import py -import math -import socket -from .util import bootstrap_class, BootstrappedObjSpace -from spyvm import model, shadow -from spyvm.shadow import MethodNotFound -from spyvm import objspace, error, display +import py, math, socket +from spyvm import model, shadow, objspace, error, display +from spyvm.shadow import MethodNotFound, WEAK_POINTERS from rpython.rlib.rarithmetic import intmask, r_uint +from rpython.rtyper.lltypesystem import lltype, rffi +from .util import create_space -space = BootstrappedObjSpace() +space = create_space() +bootstrap_class = space.bootstrap_class w_foo = space.wrap_string("foo") w_bar = space.wrap_string("bar") @@ -18,16 +17,15 @@ result += each return result - def test_new(): - w_mycls = bootstrap_class(space, 0) + w_mycls = bootstrap_class(0) w_myinstance = w_mycls.as_class_get_shadow(space).new() assert isinstance(w_myinstance, model.W_PointersObject) assert w_myinstance.getclass(space).is_same_object(w_mycls) assert w_myinstance.class_shadow(space) is w_mycls.as_class_get_shadow(space) def test_new_namedvars(): - w_mycls = bootstrap_class(space, 3) + w_mycls = bootstrap_class(3) w_myinstance = w_mycls.as_class_get_shadow(space).new() assert isinstance(w_myinstance, model.W_PointersObject) assert w_myinstance.getclass(space).is_same_object(w_mycls) @@ -37,7 +35,7 @@ assert w_myinstance.fetch(space, 1) is w_myinstance def test_bytes_object(): - w_class = bootstrap_class(space, 0, format=shadow.BYTES) + w_class = bootstrap_class(0, format=shadow.BYTES) w_bytes = w_class.as_class_get_shadow(space).new(20) assert w_bytes.getclass(space).is_same_object(w_class) assert w_bytes.size() == 20 @@ -49,7 +47,7 @@ py.test.raises(IndexError, lambda: w_bytes.getchar(20)) def test_c_bytes_object(): - w_class = bootstrap_class(space, 0, format=shadow.BYTES) + w_class = bootstrap_class(0, format=shadow.BYTES) w_bytes = w_class.as_class_get_shadow(space).new(20) w_bytes.convert_to_c_layout() assert w_bytes.getclass(space).is_same_object(w_class) @@ -62,7 +60,7 @@ py.test.raises(IndexError, lambda: w_bytes.getchar(20)) def test_word_object(): - w_class = bootstrap_class(space, 0, format=shadow.WORDS) + w_class = bootstrap_class(0, format=shadow.WORDS) w_bytes = w_class.as_class_get_shadow(space).new(20) assert w_bytes.getclass(space).is_same_object(w_class) assert w_bytes.size() == 20 @@ -74,7 +72,7 @@ py.test.raises(AssertionError, lambda: w_bytes.getword(20)) def test_c_word_object(): - w_class = bootstrap_class(space, 0, format=shadow.WORDS) + w_class = bootstrap_class(0, format=shadow.WORDS) w_bytes = w_class.as_class_get_shadow(space).new(20) w_bytes.convert_to_c_layout() assert w_bytes.getclass(space).is_same_object(w_class) @@ -92,11 +90,11 @@ self.val = val def as_compiledmethod_get_shadow(self, space): return self.val - w_class = bootstrap_class(space, mockmethod(0)) + w_class = bootstrap_class(mockmethod(0)) shadow = w_class.as_class_get_shadow(space) shadow.installmethod(w_foo, mockmethod(1)) shadow.installmethod(w_bar, mockmethod(2)) - w_subclass = bootstrap_class(space, 0, w_superclass=w_class) + w_subclass = bootstrap_class(0, w_superclass=w_class) subshadow = w_subclass.as_class_get_shadow(space) assert subshadow.s_superclass() is shadow subshadow.installmethod(w_foo, mockmethod(3)) @@ -110,23 +108,23 @@ py.test.raises(MethodNotFound, subshadow.lookup, "zork") def test_w_compiledin(): - w_super = bootstrap_class(space, 0) - w_class = bootstrap_class(space, 0, w_superclass=w_super) + w_super = bootstrap_class(0) + w_class = bootstrap_class(0, w_superclass=w_super) supershadow = w_super.as_class_get_shadow(space) - supershadow.installmethod(w_foo, model.W_CompiledMethod(0)) + supershadow.installmethod(w_foo, model.W_CompiledMethod(space, 0)) classshadow = w_class.as_class_get_shadow(space) classshadow.initialize_methoddict() assert classshadow.lookup(w_foo).w_compiledin is w_super def test_compiledmethod_setchar(): - w_method = model.W_CompiledMethod(3) + w_method = model.W_CompiledMethod(space, 3) w_method.setchar(0, "c") assert w_method.bytes == list("c\x00\x00") def test_hashes(): w_five = model.W_SmallInteger(5) assert w_five.gethash() == 5 - w_class = bootstrap_class(space, 0) + w_class = bootstrap_class(0) w_inst = w_class.as_class_get_shadow(space).new() assert w_inst.hash == w_inst.UNASSIGNED_HASH h1 = w_inst.gethash() @@ -135,7 +133,7 @@ assert h1 == w_inst.hash def test_compiledmethod_at0(): - w_method = model.W_CompiledMethod() + w_method = model.W_CompiledMethod(space, ) w_method.bytes = list("abc") w_method.header = 100 w_method.setliterals(['lit1', 'lit2']) @@ -148,7 +146,7 @@ assert space.unwrap_int(w_method.at0(space, 14)) == ord('c') def test_compiledmethod_atput0(): - w_method = model.W_CompiledMethod(3) + w_method = model.W_CompiledMethod(space, 3) newheader = joinbits([0,2,0,0,0,0],[9,8,1,6,4,1]) assert w_method.getliteralsize() == 0 w_method.atput0(space, 0, space.wrap_int(newheader)) @@ -167,7 +165,7 @@ def test_compiledmethod_atput0_not_aligned(): header = joinbits([0,2,0,0,0,0],[9,8,1,6,4,1]) - w_method = model.W_CompiledMethod(3, header) + w_method = model.W_CompiledMethod(space, 3, header) with py.test.raises(error.PrimitiveFailedError): w_method.atput0(space, 7, 'lit1') with py.test.raises(error.PrimitiveFailedError): @@ -210,10 +208,10 @@ test_not_is_same_object(space.wrap_char('d'), space.wrap_float(3.0)) def test_become_pointers(): - w_clsa = bootstrap_class(space, 3) + w_clsa = bootstrap_class(3) w_a = w_clsa.as_class_get_shadow(space).new() - w_clsb = bootstrap_class(space, 4) + w_clsb = bootstrap_class(4) w_b = w_clsb.as_class_get_shadow(space).new() hasha = w_a.gethash() @@ -234,9 +232,9 @@ assert w_a.fetch(space, 1) is w_a def test_become_with_shadow(): - w_clsa = bootstrap_class(space, 3) + w_clsa = bootstrap_class(3) s_clsa = w_clsa.as_class_get_shadow(space) - w_clsb = bootstrap_class(space, 4) + w_clsb = bootstrap_class(4) s_clsb = w_clsb.as_class_get_shadow(space) res = w_clsa.become(w_clsb) assert res @@ -357,7 +355,6 @@ # XXX: Patch SDLDisplay -> get_pixelbuffer() to circumvent # double-free bug def get_pixelbuffer(self): - from rpython.rtyper.lltypesystem import lltype, rffi return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') display.SDLDisplay.get_pixelbuffer = get_pixelbuffer d = display.SDLDisplay("test") @@ -382,7 +379,6 @@ def test_display_offset_computation(): def get_pixelbuffer(self): - from rpython.rtyper.lltypesystem import lltype, rffi return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') display.SDLDisplay.get_pixelbuffer = get_pixelbuffer d = display.SDLDisplay("test") @@ -396,9 +392,7 @@ @py.test.mark.skipif("socket.gethostname() == 'precise32'") def test_weak_pointers(): - from spyvm.shadow import WEAK_POINTERS - - w_cls = bootstrap_class(space, 1) + w_cls = bootstrap_class(1) s_cls = w_cls.as_class_get_shadow(space) s_cls.instance_kind = WEAK_POINTERS diff --git a/spyvm/test/test_objectspace.py b/spyvm/test/test_objectspace.py --- a/spyvm/test/test_objectspace.py +++ b/spyvm/test/test_objectspace.py @@ -1,9 +1,9 @@ -import py -import sys -from spyvm import objspace -from .util import BootstrappedObjSpace +import py, sys +from spyvm import objspace, model +from rpython.rlib.rarithmetic import r_uint +from .util import create_space -space = BootstrappedObjSpace() +space = create_space() def ismetaclass(w_cls): # Heuristic to detect if this is a metaclass. Don't use apart @@ -29,7 +29,6 @@ assert w_Metaclass.getclass(space).getclass(space) is w_Metaclass def test_ruint(): - from spyvm import model """ | a b | a := (9223372036854775808). @@ -41,7 +40,6 @@ => 27670116110564327424 """ - from rpython.rlib.rarithmetic import r_uint for num in [0, 1, 41, 100, 2**31, sys.maxint + 1, -1]: num = r_uint(num) assert space.unwrap_uint(space.wrap_uint(num)) == num diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -1,19 +1,22 @@ -import py -import os -import math -from .util import bootstrap_class +import py, os, math, time +from spyvm import model, shadow, interpreter, constants, primitives, objspace, wrapper, display from spyvm.primitives import prim_table, PrimitiveFailedError -from spyvm import model, shadow, interpreter -from spyvm import constants, primitives, objspace, wrapper, display from spyvm.plugins import bitblt -from .util import BootstrappedObjSpace +from rpython.rlib.rfloat import INFINITY, NAN, isinf, isnan +from rpython.rlib.rarithmetic import intmask +from rpython.rtyper.lltypesystem import lltype, rffi +from .util import create_space +from .test_interpreter import _new_frame -from rpython.rlib.rfloat import INFINITY, NAN, isinf, isnan +space = create_space() +wrap = space.w +bootstrap_class = space.bootstrap_class -space = BootstrappedObjSpace() +def new_frame(bytes): + return _new_frame(space, bytes, space.w_nil) class MockFrame(model.W_PointersObject): - def __init__(self, stack): + def __init__(self, space, stack): self.space = space size = 6 + len(stack) + 6 self.initialize_storage(space, size) @@ -30,38 +33,32 @@ self.shadow = shadow.BlockContextShadow(space, self) return self.shadow -def wrap(x): - if isinstance(x, int): return space.wrap_int(x) - if isinstance(x, float): return space.wrap_float(x) - if isinstance(x, model.W_Object): return x - if isinstance(x, str) and len(x) == 1: return space.wrap_char(x) - if isinstance(x, str): return space.wrap_string(x) - if isinstance(x, list): return space.wrap_list(x) - raise NotImplementedError - IMAGENAME = "anImage.image" -def mock(stack, context = None): - mapped_stack = [wrap(x) for x in stack] +def mock(space, stack, context = None): + mapped_stack = [space.w(x) for x in stack] if context is None: - frame = MockFrame(mapped_stack) + frame = MockFrame(space, mapped_stack) else: frame = context for i in range(len(stack)): frame.as_context_get_shadow(space).push(stack[i]) interp = interpreter.Interpreter(space, image_name=IMAGENAME) - return (interp, frame, len(stack)) + return interp, frame, len(stack) -def prim(code, stack, context = None): - interp, w_frame, argument_count = mock(stack, context) +def _prim(space, code, stack, context = None): + interp, w_frame, argument_count = mock(space, stack, context) prim_table[code](interp, w_frame.as_context_get_shadow(space), argument_count-1) res = w_frame.as_context_get_shadow(space).pop() s_frame = w_frame.as_context_get_shadow(space) assert not s_frame.stackdepth() - s_frame.tempsize() # check args are consumed return res +def prim(code, stack, context = None): + return _prim(space, code, stack, context) + def prim_fails(code, stack): - interp, w_frame, argument_count = mock(stack) + interp, w_frame, argument_count = mock(space, stack) orig_stack = list(w_frame.as_context_get_shadow(space).stack()) with py.test.raises(PrimitiveFailedError): prim_table[code](interp, w_frame.as_context_get_shadow(space), argument_count - 1) @@ -180,7 +177,6 @@ assert prim(primitives.BIT_SHIFT, [-4, 27]).value == -536870912 def test_small_int_bit_shift_fail(): - from rpython.rlib.rarithmetic import intmask prim_fails(primitives.BIT_SHIFT, [4, 32]) prim_fails(primitives.BIT_SHIFT, [4, 31]) w_result = prim(primitives.BIT_SHIFT, [4, 29]) @@ -220,7 +216,7 @@ assert prim(primitives.FLOAT_TIMES_TWO_POWER, [213.0, 1020]).value == float('inf') def test_at(): - w_obj = bootstrap_class(space, 0, varsized=True).as_class_get_shadow(space).new(1) + w_obj = bootstrap_class(0, varsized=True).as_class_get_shadow(space).new(1) foo = wrap("foo") w_obj.store(space, 0, foo) assert prim(primitives.AT, [w_obj, 1]) is foo @@ -231,11 +227,11 @@ assert prim(primitives.AT, [w_obj, 1]) == foo def test_invalid_at(): - w_obj = bootstrap_class(space, 0).as_class_get_shadow(space).new() + w_obj = bootstrap_class(0).as_class_get_shadow(space).new() prim_fails(primitives.AT, [w_obj, 1]) def test_at_put(): - w_obj = bootstrap_class(space, 0, varsized=1).as_class_get_shadow(space).new(1) + w_obj = bootstrap_class(0, varsized=1).as_class_get_shadow(space).new(1) assert prim(primitives.AT_PUT, [w_obj, 1, 22]).value == 22 assert prim(primitives.AT, [w_obj, 1]).value == 22 @@ -248,19 +244,19 @@ assert prim(primitives.AT, [w_str, 3]).value == ord('c') def test_invalid_at_put(): - w_obj = bootstrap_class(space, 0).as_class_get_shadow(space).new() + w_obj = bootstrap_class(0).as_class_get_shadow(space).new() prim_fails(primitives.AT_PUT, [w_obj, 1, 22]) def test_size(): - w_obj = bootstrap_class(space, 0, varsized=True).as_class_get_shadow(space).new(0) + w_obj = bootstrap_class(0, varsized=True).as_class_get_shadow(space).new(0) assert prim(primitives.SIZE, [w_obj]).value == 0 - w_obj = bootstrap_class(space, 3, varsized=True).as_class_get_shadow(space).new(5) + w_obj = bootstrap_class(3, varsized=True).as_class_get_shadow(space).new(5) assert prim(primitives.SIZE, [w_obj]).value == 5 def test_size_of_compiled_method(): literalsize = 3 bytecount = 3 - w_cm = model.W_CompiledMethod(bytecount) + w_cm = model.W_CompiledMethod(space, bytecount) w_cm.literalsize = literalsize assert prim(primitives.SIZE, [w_cm]).value == (literalsize+1)*constants.BYTES_PER_WORD + bytecount @@ -278,7 +274,7 @@ prim_fails(primitives.OBJECT_AT, ["q", constants.CHARACTER_VALUE_INDEX+2]) def test_invalid_object_at_put(): - w_obj = bootstrap_class(space, 1).as_class_get_shadow(space).new() + w_obj = bootstrap_class(1).as_class_get_shadow(space).new() prim_fails(primitives.OBJECT_AT_PUT, [w_obj, 2, 42]) def test_string_at_put(): @@ -343,7 +339,7 @@ def test_as_oop(): # I checked potato, and that returns the hash for as_oop - w_obj = bootstrap_class(space, 0).as_class_get_shadow(space).new() + w_obj = bootstrap_class(0).as_class_get_shadow(space).new() w_obj.hash = 22 assert prim(primitives.AS_OOP, [w_obj]).value == 22 @@ -430,14 +426,12 @@ assert equals_ttp(1.5,-1,0.75) def test_primitive_milliseconds_clock(): - import time start = prim(primitives.MILLISECOND_CLOCK, [0]).value time.sleep(0.3) stop = prim(primitives.MILLISECOND_CLOCK, [0]).value assert start + 250 <= stop def test_signal_at_milliseconds(): - import time future = prim(primitives.MILLISECOND_CLOCK, [0]).value + 400 sema = space.w_Semaphore.as_class_get_shadow(space).new() prim(primitives.SIGNAL_AT_MILLISECONDS, [space.w_nil, sema, future]) @@ -463,7 +457,6 @@ assert space.objtable["w_interrupt_semaphore"] is w_semaphore def test_seconds_clock(): - import time now = int(time.time()) w_smalltalk_now1 = prim(primitives.SECONDS_CLOCK, [42]) w_smalltalk_now2 = prim(primitives.SECONDS_CLOCK, [42]) @@ -484,7 +477,7 @@ def test_new_method(): bytecode = ''.join(map(chr, [ 16, 119, 178, 154, 118, 164, 11, 112, 16, 118, 177, 224, 112, 16, 119, 177, 224, 176, 124 ])) - shadow = bootstrap_class(space, 0).as_class_get_shadow(space) + shadow = bootstrap_class(0).as_class_get_shadow(space) w_method = prim(primitives.NEW_METHOD, [space.w_CompiledMethod, len(bytecode), 1025]) assert w_method.literalat0(space, 0).value == 1025 assert w_method.literalsize == 2 @@ -496,7 +489,7 @@ assert w_v.bytes == list(IMAGENAME) def test_clone(): - w_obj = bootstrap_class(space, 1, varsized=True).as_class_get_shadow(space).new(1) + w_obj = bootstrap_class(1, varsized=True).as_class_get_shadow(space).new(1) w_obj.atput0(space, 0, space.wrap_int(1)) w_v = prim(primitives.CLONE, [w_obj]) assert space.unwrap_int(w_v.at0(space, 0)) == 1 @@ -562,16 +555,12 @@ ) def test_directory_delimitor(): - import os.path w_c = prim(primitives.DIRECTORY_DELIMITOR, [1]) assert space.unwrap_char(w_c) == os.path.sep def test_primitive_closure_copyClosure(): - from test_interpreter import new_frame - w_frame, s_frame = new_frame("", - space=space) - w_outer_frame, s_initial_context = new_frame("", - space=space) + w_frame, s_frame = new_frame("") + w_outer_frame, s_initial_context = new_frame("") w_block = prim(primitives.CLOSURE_COPY_WITH_COPIED_VALUES, map(wrap, [w_outer_frame, 2, [wrap(1), wrap(2)]]), w_frame) assert w_block is not space.w_nil @@ -596,9 +585,7 @@ prim_fails(primitives.STRING_REPLACE, [['a', 'b'], 1, 4, "ccccc", 1]) def build_up_closure_environment(args, copiedValues=[]): - from test_interpreter import new_frame - w_frame, s_initial_context = new_frame("", - space=space) + w_frame, s_initial_context = new_frame("") size_arguments = len(args) closure = space.newClosure(w_frame, 4, #pc @@ -645,9 +632,7 @@ def test_primitive_next_instance(): someInstances = map(space.wrap_list, [[2], [3]]) - from test_interpreter import new_frame - w_frame, s_context = new_frame("", - space=space) + w_frame, s_context = new_frame("") s_context.push(space.w_Array) interp = interpreter.Interpreter(space) @@ -663,9 +648,7 @@ def test_primitive_next_instance_wo_some_instance_in_same_frame(): someInstances = map(space.wrap_list, [[2], [3]]) - from test_interpreter import new_frame - w_frame, s_context = new_frame("", - space=space) + w_frame, s_context = new_frame("") s_context.push(space.w_Array) interp = interpreter.Interpreter(space) @@ -689,9 +672,7 @@ def step(s_frame): raise Stepping - from test_interpreter import new_frame - w_frame, s_initial_context = new_frame("", - space=space) + w_frame, s_initial_context = new_frame("") closure = space.newClosure(w_frame, 4, 0, []) s_frame = w_frame.as_methodcontext_get_shadow(space) @@ -722,7 +703,6 @@ # XXX: Patch SDLDisplay -> get_pixelbuffer() to circumvent # double-free bug def get_pixelbuffer(self): - from rpython.rtyper.lltypesystem import lltype, rffi return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') display.SDLDisplay.get_pixelbuffer = get_pixelbuffer @@ -762,7 +742,6 @@ # XXX: Patch SDLDisplay -> get_pixelbuffer() to circumvent # double-free bug def get_pixelbuffer(self): - from rpython.rtyper.lltypesystem import lltype, rffi return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') display.SDLDisplay.get_pixelbuffer = get_pixelbuffer @@ -805,7 +784,7 @@ def sync_cache_mock(self): raise CallCopyBitsSimulation - interp, w_frame, argument_count = mock([mock_bitblt], None) + interp, w_frame, argument_count = mock(space, [mock_bitblt], None) if interp.image is None: interp.image = Image() diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -1,10 +1,11 @@ import random -from spyvm import model, shadow, constants, interpreter, objspace -from .util import BootstrappedObjSpace +from spyvm import model, shadow, constants, interpreter, objspace, wrapper +from .util import create_space +from test_model import joinbits -space = BootstrappedObjSpace() +space = create_space() -w_Object = space.classtable['w_Object'] +w_Object = space.classtable['w_Object'] w_Metaclass = space.classtable['w_Metaclass'] w_MethodDict = space.classtable['w_MethodDict'] w_Array = space.classtable['w_Array'] @@ -66,8 +67,8 @@ yield basicshape, "CompiledMeth", 0xE02, shadow.COMPILED_METHOD, True, 0 def test_methoddict(): - methods = {'foo': model.W_CompiledMethod(0), - 'bar': model.W_CompiledMethod(0)} + methods = {'foo': model.W_CompiledMethod(space, 0), + 'bar': model.W_CompiledMethod(space, 0)} w_class = build_smalltalk_class("Demo", 0x90, methods=methods) classshadow = w_class.as_class_get_shadow(space) methoddict = classshadow.s_methoddict().methoddict @@ -76,7 +77,7 @@ assert methods[w_key.as_string()].as_compiledmethod_get_shadow(space) is value def method(tempsize=3,argsize=2, bytes="abcde"): - w_m = model.W_CompiledMethod() + w_m = model.W_CompiledMethod(space, ) w_m.bytes = bytes w_m.tempsize = tempsize w_m.argsize = argsize @@ -156,7 +157,7 @@ def assert_contains_nils(w_obj): for i in range(w_obj.size()): - assert model.w_nil == w_obj.fetch(space, i) + assert space.w_nil == w_obj.fetch(space, i) def test_attach_mc(): w_m = method() @@ -180,10 +181,9 @@ assert s_object.fetch(1).value == 13 def test_compiledmethodshadow(): - from test_model import joinbits header = joinbits([0,2,0,1,0,0],[9,8,1,6,4,1]) - w_compiledmethod = model.W_CompiledMethod(3, header) + w_compiledmethod = model.W_CompiledMethod(space, 3, header) w_compiledmethod.setbytes(list("abc")) shadow = w_compiledmethod.as_compiledmethod_get_shadow(space) assert shadow.bytecode == "abc" @@ -232,9 +232,9 @@ def test_cached_methoddict(): # create a methoddict - foo = model.W_CompiledMethod(0) - bar = model.W_CompiledMethod(0) - baz = model.W_CompiledMethod(0) + foo = model.W_CompiledMethod(space, 0) + bar = model.W_CompiledMethod(space, 0) + baz = model.W_CompiledMethod(space, 0) methods = {'foo': foo, 'bar': bar} w_class = build_smalltalk_class("Demo", 0x90, methods=methods) @@ -259,13 +259,13 @@ def test_updating_class_changes_subclasses(): w_parent = build_smalltalk_class("Demo", 0x90, - methods={'bar': model.W_CompiledMethod(0)}) + methods={'bar': model.W_CompiledMethod(space, 0)}) w_class = build_smalltalk_class("Demo", 0x90, - methods={'foo': model.W_CompiledMethod(0)}, w_superclass=w_parent) + methods={'foo': model.W_CompiledMethod(space, 0)}, w_superclass=w_parent) s_class = w_class.as_class_get_shadow(space) version = s_class.version - w_method = model.W_CompiledMethod(0) + w_method = model.W_CompiledMethod(space, 0) key = space.wrap_string('foo') s_md = w_parent.as_class_get_shadow(space).s_methoddict() @@ -286,12 +286,11 @@ assert w_context.fetch(space, constants.CTXPART_PC_INDEX) is space.w_nil def test_methodcontext_s_home(): - from spyvm.wrapper import BlockClosureWrapper w_context = methodcontext() s_context = w_context.as_methodcontext_get_shadow(space) w_middle_context = methodcontext(w_sender=w_context) s_middle_context = w_middle_context.as_methodcontext_get_shadow(space) w_closure = space.newClosure(w_context, 3, 0, []) - s_closure_context = BlockClosureWrapper(space, w_closure).asContextWithSender(w_middle_context, []) + s_closure_context = wrapper.BlockClosureWrapper(space, w_closure).asContextWithSender(w_middle_context, []) assert s_closure_context.s_home() is s_context diff --git a/spyvm/test/test_squeakimage.py b/spyvm/test/test_squeakimage.py --- a/spyvm/test/test_squeakimage.py +++ b/spyvm/test/test_squeakimage.py @@ -1,11 +1,11 @@ -import py +import py, StringIO, sys +from struct import pack from spyvm import squeakimage from spyvm.squeakimage import chrs2int, chrs2long, swapped_chrs2long from spyvm import objspace -from .util import BootstrappedObjSpace -from struct import pack +from .util import create_space -space = BootstrappedObjSpace() +space = create_space() # ----- helpers ---------------------------------------------- @@ -20,7 +20,6 @@ return result def imagestream_mock(string): - import StringIO f = StringIO.StringIO(string) return squeakimage.Stream(f) @@ -184,7 +183,6 @@ assert r.stream.pos == len(image_2) def test_simple_image64(): - import sys if not sys.maxint == 2 ** 63 - 1: py.test.skip("on 32 bit platforms, we can't need to check for 64 bit images") word_size = 8 diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py deleted file mode 100644 --- a/spyvm/test/test_strategies.py +++ /dev/null @@ -1,190 +0,0 @@ -import py -from spyvm import wrapper, model, interpreter, strategies -from spyvm.model import w_nil -from spyvm.test import test_miniimage as tools -from spyvm.error import WrapperException, FatalError - -space, interp = tools.setup_module(tools, filename='bootstrapped.image') -class_Array = space.classtable["w_Array"] - -def arr(size): - return model.W_PointersObject(space, class_Array, size) - -def list_arr(size): - a = arr(size) - a.store(space, 0, arr(1)) - return a - -def int_arr(size): - a = arr(size) - a.store(space, 0, space.wrap_int(12)) - return a - -def float_arr(size): - a = arr(size) - a.store(space, 0, space.wrap_float(1.2)) - return a - -def check_arr(arr, expected): - for i in range(arr.size()): - w_val = arr.fetch(space, i) - if expected[i] == w_nil: - assert w_val == w_nil - elif isinstance(expected[i], int): - assert isinstance(w_val, model.W_SmallInteger) - assert space.unwrap_int(w_val) == expected[i] - elif isinstance(expected[i], float): - assert isinstance(w_val, model.W_Float) - assert space.unwrap_float(w_val) == expected[i] - else: - assert False, "Unexpected array of expected values." - -# ====== AllNil StorageStrategy - -def test_EmptyArray(): - a = arr(5) - assert isinstance(a.strategy, strategies.AllNilStorageStrategy) - -def test_StoreNil(): - a = arr(5) - a.store(space, 0, w_nil) - a.store(space, 4, w_nil) - assert isinstance(a.strategy, strategies.AllNilStorageStrategy) - -def test_FetchNil(): - a = arr(5) - assert a.fetch(space, 2) is w_nil - -def test_AllNilSize(): - a = arr(5) - assert a.size() == 5 - -# ====== List StorageStrategy - -def test_AllNil_to_List(): - a = list_arr(5) - assert isinstance(a.strategy, strategies.ListStorageStrategy) - -def test_List_store(): - a = list_arr(5) - a.store(space, 1, arr(1)) - a.store(space, 4, arr(1)) - assert isinstance(a.strategy, strategies.ListStorageStrategy) - -def test_List_fetch(): - a = list_arr(5) - assert a.fetch(space, 0).getclass(space) == class_Array - assert a.fetch(space, 4) == w_nil - -def test_List_size(): - a = list_arr(5) - a.store(space, 1, arr(1)) - assert a.size() == 5 - -# ====== SmallIntegerOrNil StorageStrategy - -def test_AllNil_to_Int(): - a = int_arr(5) - assert isinstance(a.strategy, strategies.SmallIntegerOrNilStorageStrategy) - check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) - -def test_SmallInt_store(): - a = int_arr(5) - a.store(space, 1, space.wrap_int(20)) - a.store(space, 2, space.wrap_int(20)) - assert isinstance(a.strategy, strategies.SmallIntegerOrNilStorageStrategy) - check_arr(a, [12, 20, 20, w_nil, w_nil]) - -def test_SmallInt_store_nil_to_nil(): - a = int_arr(5) - a.store(space, 1, w_nil) - check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) - -def test_SmallInt_overwrite(): - a = int_arr(5) - a.store(space, 1, space.wrap_int(1)) - a.store(space, 3, space.wrap_int(2)) - a.store(space, 0, space.wrap_int(100)) - a.store(space, 1, space.wrap_int(200)) - a.store(space, 3, space.wrap_int(300)) - check_arr(a, [100, 200, w_nil, 300, w_nil]) - -def test_SmallInt_delete(): - a = int_arr(5) - a.store(space, 1, space.wrap_int(1)) - a.store(space, 1, w_nil) - check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) - -def test_SmallInt_to_List(): - a = int_arr(5) - a.store(space, 1, arr(1)) - assert isinstance(a.strategy, strategies.ListStorageStrategy) - -def test_SmallInt_store_Float_to_List(): - a = int_arr(5) - a.store(space, 1, space.wrap_float(2.2)) - assert isinstance(a.strategy, strategies.ListStorageStrategy) - check_arr(a, [12, 2.2, w_nil, w_nil, w_nil]) - -# ====== FloatOrNil StorageStrategy - -def test_AllNil_to_Float(): - a = float_arr(5) - assert isinstance(a.strategy, strategies.FloatOrNilStorageStrategy) - check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) - -def test_Float_store(): - a = float_arr(5) - a.store(space, 1, space.wrap_float(20.0)) - a.store(space, 2, space.wrap_float(20.0)) - assert isinstance(a.strategy, strategies.FloatOrNilStorageStrategy) - check_arr(a, [1.2, 20.0, 20.0, w_nil, w_nil]) - -def test_Float_store_nil_to_nil(): - a = float_arr(5) - a.store(space, 1, w_nil) - check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) - -def test_Float_overwrite(): - a = float_arr(5) - a.store(space, 1, space.wrap_float(1.0)) - a.store(space, 3, space.wrap_float(2.0)) - a.store(space, 0, space.wrap_float(100.0)) - a.store(space, 1, space.wrap_float(200.0)) - a.store(space, 3, space.wrap_float(300.0)) - check_arr(a, [100.0, 200.0, w_nil, 300.0, w_nil]) - -def test_Float_delete(): - a = float_arr(5) - a.store(space, 1, space.wrap_float(1.0)) - a.store(space, 1, w_nil) - check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) - -def test_Float_to_List(): - a = float_arr(5) - a.store(space, 1, arr(1)) - assert isinstance(a.strategy, strategies.ListStorageStrategy) - -def test_Float_store_SmallInt_to_List(): - a = float_arr(5) - a.store(space, 1, space.wrap_int(2)) - assert isinstance(a.strategy, strategies.ListStorageStrategy) - check_arr(a, [1.2, 2, w_nil, w_nil, w_nil]) - -def test_statistics(): - stats = model.StrategyStatistics() - stats.stat_operation("B", "old", "new", 3) - stats.stat_operation("B", "old", "new", 4) - stats.stat_operation("B", "old2", "new2", 20) - stats.stat_operation("B", "old", "new", 5) - stats.stat_operation("A", "old", "new", 1) - stats.stat_operation("A", "old", "new", 2) - stats.stat_operation("C", "old", "new", 10) - stats.stat_operation("C", "old", "new", 11) - keys = stats.sorted_keys() - assert keys == [ ("A", "old", "new"), ("B", "old", "new"), ("B", "old2", "new2"), ("C", "old", "new") ] - assert stats.stats[keys[0]] == [1, 2] - assert stats.stats[keys[1]] == [3, 4, 5] - assert stats.stats[keys[2]] == [20] - assert stats.stats[keys[3]] == [10, 11] - \ No newline at end of file diff --git a/spyvm/test/test_wrapper.py b/spyvm/test/test_wrapper.py --- a/spyvm/test/test_wrapper.py +++ b/spyvm/test/test_wrapper.py @@ -1,13 +1,13 @@ import py from spyvm import wrapper, model, interpreter, objspace from spyvm.error import WrapperException, FatalError -from .util import BootstrappedObjSpace -from spyvm.test.test_interpreter import new_frame as new_frame_tuple +from .util import create_space +from spyvm.test.test_interpreter import _new_frame -space = BootstrappedObjSpace() +space = create_space() def new_frame(): - return new_frame_tuple("")[0] + return _new_frame(space, "")[0] def test_simpleread(): w_o = model.W_PointersObject(space, None, 2) diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -1,15 +1,10 @@ -import py -from spyvm import squeakimage, model, constants -from spyvm import interpreter, shadow, objspace -from spyvm.test import test_miniimage as tools -from spyvm.test.test_miniimage import w +from spyvm import squeakimage, model, constants, interpreter, shadow, objspace +from .util import read_image def setup(): - tools.setup_module(tools, filename='Squeak4.5-12568.image') - global space - global interp - space = tools.space - interp = tools.interp + import spyvm.test.test_zin_squeak_4_5_image as mod + mod.space, mod.interp, mod.image, mod.reader = read_image('Squeak4.5-12568.image') + mod.w = space.w def find_symbol_in_methoddict_of(string, s_class): s_methoddict = s_class.s_methoddict() @@ -20,11 +15,13 @@ return each def test_all_pointers_are_valid(): - tools.test_all_pointers_are_valid() - tools.test_lookup_abs_in_integer() + from test_miniimage import _test_all_pointers_are_valid + from test_miniimage import _test_lookup_abs_in_integer + _test_all_pointers_are_valid(reader) + _test_lookup_abs_in_integer(interp) def create_method_shadow(bytes, literals=[], islarge=0, argsize=0, tempsize=0): - w_method = model.W_CompiledMethod(len(bytes)) + w_method = model.W_CompiledMethod(space, len(bytes)) w_method.bytes = bytes w_method.islarge = islarge w_method.argsize = argsize diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -1,12 +1,57 @@ -from spyvm import model, shadow, objspace, version, constants +from spyvm import model, shadow, objspace, version, constants, squeakimage, interpreter from rpython.rlib.objectmodel import instantiate +def open_reader(space, imagefilename): + from spyvm.tool.analyseimage import image_dir + imagefilename = image_dir.join(imagefilename) + return squeakimage.reader_for_image(space, squeakimage.Stream(imagefilename.open(mode="rb"))) + +def read_image(image_filename): + space = create_space() + reader = open_reader(space, image_filename) + reader.initialize() + image = squeakimage.SqueakImage() + image.from_reader(space, reader) + interp = interpreter.Interpreter(space, image) + return space, interp, image, reader + +def create_space(): + return BootstrappedObjSpace() + +def create_space_interp(): + space = create_space() + interp = interpreter.Interpreter(space) + return space, interp + +def find_symbol_in_methoddict_of(string, s_class): + s_methoddict = s_class.s_methoddict() + s_methoddict.sync_method_cache() + methoddict_w = s_methoddict.methoddict + for each in methoddict_w.keys(): + if each.as_string() == string: + return each + class BootstrappedObjSpace(objspace.ObjSpace): + def w(self, any): + if any is None: return self.w_nil + if isinstance(any, model.W_Object): return any + if isinstance(any, str): + # assume never have strings of length 1 + if len(any) == 1: + return self.wrap_char(any) + else: + return self.wrap_string(any) + if isinstance(any, bool): return self.wrap_bool(any) + if isinstance(any, int): return self.wrap_int(any) + if isinstance(any, float): return self.wrap_float(any) + if isinstance(any, list): return self.wrap_list(any) + raise Exception("Cannot wrap %r" % any) + def make_bootstrap_classes(self): def define_core_cls(name, w_superclass, w_metaclass): assert name.startswith('w_') - w_class = bootstrap_class(self, instsize=0, # XXX + w_class = self.bootstrap_class(instsize=0, # XXX w_superclass=w_superclass, w_metaclass=w_metaclass, name=name[2:]) @@ -62,12 +107,12 @@ meta_super_nm = supercls_nm + "Class" w_Metaclass = self.classtable["w_Metaclass"] w_meta_cls = self.classtable[meta_nm] = \ - bootstrap_class(self, 0, # XXX + self.bootstrap_class(0, # XXX self.classtable[meta_super_nm], w_Metaclass, name=meta_nm[2:]) w_cls = self.classtable[cls_nm] = \ - bootstrap_class(self, instvarsize, + self.bootstrap_class(instvarsize, self.classtable[supercls_nm], w_meta_cls, format=format, @@ -119,58 +164,53 @@ w_cinst.store(self, constants.CHARACTER_VALUE_INDEX, model.W_SmallInteger(i)) return w_cinst - w_charactertable = model.W_PointersObject(self, - self.classtable['w_Array'], 256) - self.w_charactertable = w_charactertable + w_charactertable = model.W_PointersObject(self, self.classtable['w_Array'], 256) + self.add_bootstrap_object("w_charactertable", w_charactertable) for i in range(256): self.w_charactertable.atput0(self, i, bld_char(i)) - - # Very special nil hack: in order to allow W_PointersObject's to - # initialize their fields to nil, we have to create it in the model - # package, and then patch up its fields here: - def patch_nil(w_nil): - w_nil.space = self - w_nil.s_class = self.classtable['w_UndefinedObject'].as_class_get_shadow(self) - w_nil.initialize_storage(self, 0) - return w_nil - w_nil = self.w_nil = patch_nil(model.w_nil) - + # w_nil is already added to objtable in constructor. + self.w_nil.w_class = self.classtable['w_UndefinedObject'] + self.w_nil.initialize_storage(self, 0) + w_true = self.classtable['w_True'].as_class_get_shadow(self).new() - self.w_true = w_true + self.add_bootstrap_object("w_true", w_true) w_false = self.classtable['w_False'].as_class_get_shadow(self).new() - self.w_false = w_false - self.w_minus_one = model.W_SmallInteger(-1) - self.w_zero = model.W_SmallInteger(0) - self.w_one = model.W_SmallInteger(1) - self.w_two = model.W_SmallInteger(2) + self.add_bootstrap_object("w_false", w_false) + + self.add_bootstrap_object("w_minus_one", model.W_SmallInteger(-1)) + self.add_bootstrap_object("w_zero", model.W_SmallInteger(0)) + self.add_bootstrap_object("w_one", model.W_SmallInteger(1)) + self.add_bootstrap_object("w_two", model.W_SmallInteger(2)) w_special_selectors = model.W_PointersObject(self, self.classtable['w_Array'], len(constants.SPECIAL_SELECTORS) * 2) - self.w_special_selectors = w_special_selectors - - self.objtable = {} + self.add_bootstrap_object("w_special_selectors", w_special_selectors) + for name in constants.objects_in_special_object_table: name = "w_" + name - try: - self.objtable[name] = locals()[name] From noreply at buildbot.pypy.org Wed Mar 26 07:40:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Mar 2014 07:40:40 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: fix: the JIT doesn't support for now 'with stm_ignored:' Message-ID: <20140326064040.7B36F1C015D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70294:d3957e5f25d8 Date: 2014-03-26 07:38 +0100 http://bitbucket.org/pypy/pypy/changeset/d3957e5f25d8/ Log: fix: the JIT doesn't support for now 'with stm_ignored:' diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -377,12 +377,12 @@ # special non-computed-yet value. if not s: return 0 - with stm_ignored: - x = s.hash + x = s.hash if x == 0: x = LLHelpers._ll_compute_strhash(s) return x + @jit.dont_look_inside def _ll_compute_strhash(s): x = _hash_string(s.chars) if x == 0: @@ -396,11 +396,10 @@ def ll_strfasthash(s): if rgc.stm_is_enabled(): - # due to "with stm_ignored" in _ll_strhash(), it is possible - # that just returning 's.hash' from here would rarely return - # the old value, which is 0. We need to check. - with stm_ignored: - x = s.hash + # due to "with stm_ignored" in _ll_compute_strhash(), it is + # possible that just returning 's.hash' from here would rarely + # return the old value, which is 0. We need to check. + x = s.hash if x == 0: x = LLHelpers._ll_compute_strhash(s) return x From noreply at buildbot.pypy.org Wed Mar 26 10:34:52 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 26 Mar 2014 10:34:52 +0100 (CET) Subject: [pypy-commit] benchmarks default: remove some c4 hacks. may still be useful in c7 but shouldn't be in a benchmark Message-ID: <20140326093452.398591D23F5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r241:20ec94a7ed50 Date: 2014-03-26 10:34 +0100 http://bitbucket.org/pypy/benchmarks/changeset/20ec94a7ed50/ Log: remove some c4 hacks. may still be useful in c7 but shouldn't be in a benchmark diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -62,7 +62,7 @@ import multiprocessing -_thread_pool = ThreadPool(3 * multiprocessing.cpu_count()) +_thread_pool = ThreadPool(1.5 * multiprocessing.cpu_count()) diff --git a/multithread/raytrace/raytrace.py b/multithread/raytrace/raytrace.py --- a/multithread/raytrace/raytrace.py +++ b/multithread/raytrace/raytrace.py @@ -127,24 +127,15 @@ tasks = 0 def task(x, h, cameraPos, objs, lightSource): - # force a transaction break here (STM not yet smart enough - # to figure out that it should break here) - time.sleep(0) - with atomic: for y in range(h): ray = Ray(cameraPos, (Vector(x/50.0-5,y/50.0-5,0)-cameraPos).normal()) trace(ray, objs, lightSource, 10) - # force a transaction break. updating a global var should - # be done in a separate transaction: - time.sleep(0) - global tasks with atomic: tasks -= 1 - time.sleep(0) futures = [] def future_dispatcher(ths, *args): @@ -157,7 +148,6 @@ tasks += 1 futures.append(Future(task, *args)) - time.sleep(0) From noreply at buildbot.pypy.org Wed Mar 26 13:19:21 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 26 Mar 2014 13:19:21 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Added setup_module and teardown_module to every test module. Message-ID: <20140326121921.C1BC31D2996@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r698:83ba26abe561 Date: 2014-03-26 13:19 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/83ba26abe561/ Log: Added setup_module and teardown_module to every test module. All module-wide resources are cleaned up after the tests; added some util functions to make this more concise. Following this pattern makes the tests more safe and guards agains mixing up space/interp instances in unintended ways. Also, this frees a lot of memory after test modules that load big images etc. Unfortunately, default arguments to test_* methods cannot be used this way, had to be replaced with None guards. Still, way cleaner this way. diff --git a/spyvm/test/test_bitblt.py b/spyvm/test/test_bitblt.py --- a/spyvm/test/test_bitblt.py +++ b/spyvm/test/test_bitblt.py @@ -1,9 +1,14 @@ from spyvm import model, shadow, constants, interpreter, objspace from spyvm.plugins import bitblt -from .util import create_space +from .util import create_space, copy_to_module, cleanup_module -space = create_space() -w = space.w +def setup_module(): + space = create_space() + w = space.w + copy_to_module(locals(), __name__) + +def teardown_module(): + cleanup_module(__name__) def make_form(bits, width, height, depth, o_x=0, o_y=0): w_f = model.W_PointersObject(space, space.w_Array, 5) diff --git a/spyvm/test/test_bootstrappedimage.py b/spyvm/test/test_bootstrappedimage.py --- a/spyvm/test/test_bootstrappedimage.py +++ b/spyvm/test/test_bootstrappedimage.py @@ -1,14 +1,17 @@ import py from spyvm import squeakimage, model, constants from spyvm import interpreter, shadow -from .util import read_image +from .util import read_image, copy_to_module, cleanup_module -def setup(): - import spyvm.test.test_bootstrappedimage as mod - mod.space, mod.interp, mod.image, mod.reader = read_image("bootstrapped.image") - mod.w = space.w - mod.perform = interp.perform - mod.space.initialize_class(mod.space.w_String, mod.interp) +def setup_module(): + space, interp, image, reader = read_image("bootstrapped.image") + w = space.w + perform = interp.perform + copy_to_module(locals(), __name__) + space.initialize_class(space.w_String, interp) + +def teardown_module(): + cleanup_module(__name__) def test_symbol_asSymbol(): w_result = perform(image.w_asSymbol, "asSymbol") diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1,10 +1,15 @@ import py, operator, sys from spyvm import model, interpreter, primitives, shadow, objspace, wrapper, constants -from .util import create_space_interp +from .util import create_space_interp, copy_to_module, cleanup_module from spyvm.wrapper import PointWrapper from spyvm.conftest import option -space, interp = create_space_interp() +def setup_module(): + space, interp = create_space_interp() + copy_to_module(locals(), __name__) + +def teardown_module(): + cleanup_module(__name__) def bootstrap_class(instsize, w_superclass=None, w_metaclass=None, name='?', format=shadow.POINTERS, varsized=True): @@ -101,7 +106,9 @@ s_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space, receiver, ["foo", "bar"]) return s_frame.w_self(), s_frame -def new_frame(bytes, receiver=space.w_nil, space=space): +def new_frame(bytes, receiver=None): + if not receiver: + receiver = space.w_nil return _new_frame(space, bytes, receiver) def test_create_frame(): @@ -697,7 +704,9 @@ storeAssociation(doubleExtendedDoAnythingBytecode + chr(7<<5) + chr(0)) -def interpret_bc(bcodes, literals, receiver=space.w_nil): +def interpret_bc(bcodes, literals, receiver=None): + if not receiver: + receiver = space.w_nil bcode = "".join([chr(x) for x in bcodes]) w_frame, s_frame = new_frame(bcode, receiver=receiver) s_frame.w_method().setliterals(literals) diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -1,14 +1,19 @@ import py, operator from spyvm import squeakimage, model, constants, error, interpreter, shadow, primitives from spyvm.test.test_primitives import MockFrame -from .util import read_image, find_symbol_in_methoddict_of +from .util import read_image, find_symbol_in_methoddict_of, copy_to_module, cleanup_module from rpython.rlib.rarithmetic import intmask, r_uint -space, interp, _, _ = read_image('bootstrapped.image') -w = space.w -perform = interp.perform -interp.trace = False -space.initialize_class(space.w_String, interp) +def setup_module(): + space, interp, _, _ = read_image('bootstrapped.image') + w = space.w + perform = interp.perform + copy_to_module(locals(), __name__) + interp.trace = False + space.initialize_class(space.w_String, interp) + +def teardown_module(): + cleanup_module(__name__) def perform_primitive(rcvr, w_selector, *args): code = rcvr.class_shadow(space).lookup(w_selector).primitive() diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -1,10 +1,15 @@ import py, math from spyvm import squeakimage, model, constants, interpreter, shadow, objspace, wrapper, primitives -from .util import read_image, open_reader +from .util import read_image, open_reader, copy_to_module, cleanup_module -space, interp, image, reader = read_image("mini.image") -w = space.w -perform = interp.perform +def setup_module(): + space, interp, image, reader = read_image("mini.image") + w = space.w + perform = interp.perform + copy_to_module(locals(), __name__) + +def teardown_module(): + cleanup_module(__name__) def open_miniimage(): return open_reader(space, "mini.image") diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -3,12 +3,17 @@ from spyvm.shadow import MethodNotFound, WEAK_POINTERS from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rtyper.lltypesystem import lltype, rffi -from .util import create_space +from .util import create_space, copy_to_module, cleanup_module -space = create_space() -bootstrap_class = space.bootstrap_class -w_foo = space.wrap_string("foo") -w_bar = space.wrap_string("bar") +def setup_module(): + space = create_space() + bootstrap_class = space.bootstrap_class + w_foo = space.wrap_string("foo") + w_bar = space.wrap_string("bar") + copy_to_module(locals(), __name__) + +def teardown_module(): + cleanup_module(__name__) def joinbits(values, lengths): result = 0 @@ -171,13 +176,19 @@ with py.test.raises(error.PrimitiveFailedError): w_method.atput0(space, 9, space.wrap_int(5)) -def test_is_same_object(w_o1=model.W_PointersObject(space, None,0), w_o2=None): +def test_is_same_object(w_o1=None, w_o2=None): + if w_o1 is None: + w_o1 = model.W_PointersObject(space, None, 0) if w_o2 is None: w_o2 = w_o1 assert w_o1.is_same_object(w_o2) assert w_o2.is_same_object(w_o1) -def test_not_is_same_object(w_o1=model.W_PointersObject(space, None,0),w_o2=model.W_PointersObject(space, None,0)): +def test_not_is_same_object(w_o1=None,w_o2=None): + if w_o1 is None: + w_o1 = model.W_PointersObject(space, None, 0) + if w_o2 is None: + w_o2 = model.W_PointersObject(space, None,0) assert not w_o1.is_same_object(w_o2) assert not w_o2.is_same_object(w_o1) w_o2 = model.W_SmallInteger(2) diff --git a/spyvm/test/test_objectspace.py b/spyvm/test/test_objectspace.py --- a/spyvm/test/test_objectspace.py +++ b/spyvm/test/test_objectspace.py @@ -1,9 +1,14 @@ import py, sys from spyvm import objspace, model from rpython.rlib.rarithmetic import r_uint -from .util import create_space +from .util import create_space, copy_to_module, cleanup_module -space = create_space() +def setup_module(): + space = create_space() + copy_to_module(locals(), __name__) + +def teardown_module(): + cleanup_module(__name__) def ismetaclass(w_cls): # Heuristic to detect if this is a metaclass. Don't use apart diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -5,12 +5,17 @@ from rpython.rlib.rfloat import INFINITY, NAN, isinf, isnan from rpython.rlib.rarithmetic import intmask from rpython.rtyper.lltypesystem import lltype, rffi -from .util import create_space +from .util import create_space, copy_to_module, cleanup_module from .test_interpreter import _new_frame -space = create_space() -wrap = space.w -bootstrap_class = space.bootstrap_class +def setup_module(): + space = create_space() + wrap = space.w + bootstrap_class = space.bootstrap_class + copy_to_module(locals(), __name__) + +def teardown_module(): + cleanup_module(__name__) def new_frame(bytes): return _new_frame(space, bytes, space.w_nil) diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -1,14 +1,18 @@ import random from spyvm import model, shadow, constants, interpreter, objspace, wrapper -from .util import create_space +from .util import create_space, copy_to_module, cleanup_module from test_model import joinbits -space = create_space() +def setup_module(): + space = create_space() + w_Object = space.classtable['w_Object'] + w_Metaclass = space.classtable['w_Metaclass'] + w_MethodDict = space.classtable['w_MethodDict'] + w_Array = space.classtable['w_Array'] + copy_to_module(locals(), __name__) -w_Object = space.classtable['w_Object'] -w_Metaclass = space.classtable['w_Metaclass'] -w_MethodDict = space.classtable['w_MethodDict'] -w_Array = space.classtable['w_Array'] +def teardown_module(): + cleanup_module(__name__) def build_methoddict(methods): size = int(len(methods) * 1.5) @@ -29,8 +33,10 @@ w_array.store(space, pos, w_compiledmethod) return w_methoddict -def build_smalltalk_class(name, format, w_superclass=w_Object, +def build_smalltalk_class(name, format, w_superclass=None, w_classofclass=None, methods={}): + if w_superclass is None: + w_superclass = w_Object if w_classofclass is None: w_classofclass = build_smalltalk_class(None, 0x94, w_superclass.getclass(space), @@ -76,7 +82,7 @@ for w_key, value in methoddict.items(): assert methods[w_key.as_string()].as_compiledmethod_get_shadow(space) is value -def method(tempsize=3,argsize=2, bytes="abcde"): +def create_method(tempsize=3,argsize=2, bytes="abcde"): w_m = model.W_CompiledMethod(space, ) w_m.bytes = bytes w_m.tempsize = tempsize @@ -84,8 +90,12 @@ w_m.literalsize = 2 return w_m -def methodcontext(w_sender=space.w_nil, pc=13, stackpointer=0, stacksize=5, - method=method()): +def methodcontext(w_sender=None, pc=13, stackpointer=0, stacksize=5, + method=None): + if w_sender is None: + w_sender = space.w_nil + if method is None: + method = create_method() w_object = model.W_PointersObject(space, space.w_MethodContext, constants.MTHDCTX_TEMP_FRAME_START+method.tempsize+stacksize) w_object.store(space, constants.CTXPART_SENDER_INDEX, w_sender) w_object.store(space, constants.CTXPART_PC_INDEX, space.wrap_int(pc)) @@ -98,8 +108,12 @@ w_object.store(space, constants.MTHDCTX_TEMP_FRAME_START, space.wrap_string('el')) return w_object -def blockcontext(w_sender=space.w_nil, pc=13, stackpointer=1, stacksize=5, - home=methodcontext()): +def blockcontext(w_sender=None, pc=13, stackpointer=1, stacksize=5, + home=None): + if w_sender is None: + w_sender = space.w_nil + if home is None: + home = methodcontext() w_object = model.W_PointersObject(space, space.w_MethodContext, constants.MTHDCTX_TEMP_FRAME_START+stacksize) w_object.store(space, constants.CTXPART_SENDER_INDEX, w_sender) w_object.store(space, constants.CTXPART_PC_INDEX, space.wrap_int(pc)) @@ -111,7 +125,7 @@ return w_object def test_context(): - w_m = method() + w_m = create_method() w_object = methodcontext(stackpointer=3, method=w_m) w_object2 = methodcontext(w_sender=w_object) s_object = w_object.as_methodcontext_get_shadow(space) @@ -144,7 +158,7 @@ assert s_object.stackdepth() == s_object.tempsize() def test_methodcontext(): - w_m = method() + w_m = create_method() # Point over 2 literals of size 4 w_object = methodcontext(pc=13,method=w_m) s_object = w_object.as_methodcontext_get_shadow(space) @@ -160,7 +174,7 @@ assert space.w_nil == w_obj.fetch(space, i) def test_attach_mc(): - w_m = method() + w_m = create_method() w_object = methodcontext(pc=13, method=w_m) s_object = w_object.as_methodcontext_get_shadow(space) assert s_object.fetch(1).value == 13 diff --git a/spyvm/test/test_squeakimage.py b/spyvm/test/test_squeakimage.py --- a/spyvm/test/test_squeakimage.py +++ b/spyvm/test/test_squeakimage.py @@ -3,9 +3,14 @@ from spyvm import squeakimage from spyvm.squeakimage import chrs2int, chrs2long, swapped_chrs2long from spyvm import objspace -from .util import create_space +from .util import create_space, copy_to_module, cleanup_module -space = create_space() +def setup_module(): + space = create_space() + copy_to_module(locals(), __name__) + +def teardown_module(): + cleanup_module(__name__) # ----- helpers ---------------------------------------------- @@ -27,7 +32,6 @@ stream = imagestream_mock(string) return squeakimage.reader_for_image(space, stream) - SIMPLE_VERSION_HEADER = pack(">i", 6502) SIMPLE_VERSION_HEADER_LE = pack(" Author: Remi Meier Branch: stmgc-c7 Changeset: r70295:4fc00f232e44 Date: 2014-03-26 14:35 +0100 http://bitbucket.org/pypy/pypy/changeset/4fc00f232e44/ Log: probably fixes something (call_assembler didn't assume next op could be in a new transaction & it looks like we didn't save roots around some llops) diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -96,6 +96,12 @@ assert not op.is_call() and not op.is_malloc() self.fallback_inevitable(op) + def handle_call_assembler(self, op): + # required, because this op is only handled in super class + # and we didn't call this line yet: + self.next_op_may_be_in_new_transaction() + GcRewriterAssembler.handle_call_assembler(self, op) + def next_op_may_be_in_new_transaction(self): self.always_inevitable = False self.read_barrier_applied.clear() diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -113,17 +113,18 @@ def gct_gc_can_move(self, hop): hop.rename('stm_can_move') -## def _gct_with_roots_pushed(self, hop): -## livevars = self.push_roots(hop) -## self.default(hop) -## self.pop_roots(hop, livevars) + def _gct_with_roots_pushed(self, hop): + livevars = self.push_roots(hop) + self.default(hop) + self.pop_roots(hop, livevars) -## # sync with lloperation.py -## gct_stm_become_inevitable = _gct_with_roots_pushed -## gct_stm_partial_commit_and_resume_other_threads = _gct_with_roots_pushed -## gct_stm_perform_transaction = _gct_with_roots_pushed -## gct_stm_inspect_abort_info = _gct_with_roots_pushed -## gct_stm_threadlocalref_set = _gct_with_roots_pushed + # sync with lloperation.py + gct_stm_become_inevitable = _gct_with_roots_pushed + + gct_stm_become_globally_unique_transaction = _gct_with_roots_pushed + gct_stm_perform_transaction = _gct_with_roots_pushed + gct_stm_inspect_abort_info = _gct_with_roots_pushed + gct_stm_threadlocalref_set = _gct_with_roots_pushed class StmRootWalker(BaseRootWalker): diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -16,7 +16,7 @@ 'jit_force_quasi_immutable', 'jit_marker', 'jit_is_virtual', 'jit_record_known_class', 'gc_identityhash', 'gc_id', 'gc_can_move', 'gc__collect', - 'gc_adr_of_root_stack_top', + 'gc_adr_of_root_stack_top', 'gc_add_memory_pressure', 'weakref_create', 'weakref_deref', 'stm_threadlocalref_get', 'stm_threadlocalref_set', 'stm_threadlocalref_count', 'stm_threadlocalref_addr', @@ -68,7 +68,7 @@ return funcptr._name + '()' except AttributeError: return True - + elif op.opname == 'indirect_call': tographs = op.args[-1].value if tographs is not None: @@ -76,10 +76,10 @@ return False # unknown function return True - + assert False - - + + def should_turn_inevitable(op, block, fresh_mallocs): # Always-allowed operations never cause a 'turn inevitable' if op.opname in ALWAYS_ALLOW_OPERATIONS: From noreply at buildbot.pypy.org Wed Mar 26 14:47:11 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 26 Mar 2014 14:47:11 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: add a stmrewrite test that was fixed by the previous commit Message-ID: <20140326134711.2E5FE1D29A0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r70296:8732882f7eac Date: 2014-03-26 14:46 +0100 http://bitbucket.org/pypy/pypy/changeset/8732882f7eac/ Log: add a stmrewrite test that was fixed by the previous commit diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -81,10 +81,10 @@ oopspecindex = 0 def call_needs_inevitable(self): return inev - - calldescr = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U), + + calldescr = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U), fakeextrainfo()) - + self.check_rewrite(""" [] call(123, descr=cd) @@ -315,7 +315,7 @@ jump() """, """ [i1, i2] - + jump() """) @@ -410,7 +410,7 @@ i2 = int_add(i1, 1) cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, i2, descr=tydescr) - + jump(p1) """) @@ -453,7 +453,7 @@ i3 = getfield_raw(i1, descr=tydescr) keepalive(i3) i4 = getfield_raw(i2, descr=tydescr) - + jump(i3, i4) """) @@ -469,7 +469,7 @@ """, """ [i1] i2 = getfield_raw(i1, descr=fdescr) - + jump(i2) """, fdescr=fdescr) @@ -487,7 +487,7 @@ label(i1, i2, i3) $INEV i4 = getfield_raw(i2, descr=tydescr) - + jump(i3, i4) """) @@ -502,7 +502,7 @@ $INEV i3 = getarrayitem_raw(i1, 5, descr=adescr) i4 = getarrayitem_raw(i2, i3, descr=adescr) - + jump(i3, i4) """) @@ -518,7 +518,7 @@ setarrayitem_gc(p1, i1, p2, descr=adescr) cond_call_gc_wb(p3, descr=wbdescr) setarrayitem_gc(p3, i3, p4, descr=adescr) - + jump() """) @@ -535,7 +535,7 @@ setarrayitem_gc(p1, i2, p2, descr=adescr) i4 = read_timestamp() setarrayitem_gc(p1, i3, p3, descr=adescr) - + jump() """) @@ -552,7 +552,7 @@ setinteriorfield_gc(p1, i2, p2, descr=intzdescr) i4 = read_timestamp() setinteriorfield_gc(p1, i3, p3, descr=intzdescr) - + jump() """) @@ -567,7 +567,7 @@ cond_call_gc_wb(p1, descr=wbdescr) strsetitem(p1, i2, i3) unicodesetitem(p1, i2, i3) - + jump() """) @@ -616,10 +616,10 @@ setfield_gc(p7, 10, descr=tydescr) call_release_gil(123, descr=calldescr2) guard_not_forced() [] - + cond_call_gc_wb(p7, descr=wbdescr) setfield_gc(p7, 20, descr=tydescr) - + jump(i2, p7) """, calldescr2=calldescr2) @@ -647,7 +647,7 @@ %s cond_call_gc_wb(p7, descr=wbdescr) setfield_gc(p7, 20, descr=tydescr) - + jump(i2, p7) """ % op, calldescr2=calldescr2) @@ -695,7 +695,7 @@ setfield_gc(p1, 10, descr=tydescr) %s setfield_gc(p1, 20, descr=tydescr) - + jump(p1) """ % op) @@ -730,7 +730,7 @@ %s cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, 20, descr=tydescr) - + jump(p1) """ % (op, guard, tr_break), calldescr2=calldescr2) @@ -738,7 +738,7 @@ self.check_rewrite(""" [i0, f0] i2 = call_assembler(i0, f0, descr=casmdescr) - guard_not_forced()[] + guard_not_forced()[] """, """ [i0, f0] i1 = getfield_raw(ConstClass(frame_info), descr=jfi_frame_depth) @@ -750,7 +750,41 @@ setarrayitem_gc(p1, 1, f0, descr=floatframedescr) i3 = call_assembler(p1, descr=casmdescr) guard_not_forced() [] - + + """) + + def test_repeat_barrier_after_call_assembler(self): + self.check_rewrite(""" + [i0, f0, p1] + p2 = getfield_gc(p1, descr=tzdescr) + setfield_gc(p1, p2, descr=tzdescr) + + i2 = call_assembler(i0, f0, descr=casmdescr) + guard_not_forced()[] + + p3 = getfield_gc(p1, descr=tzdescr) + setfield_gc(p1, p3, descr=tzdescr) + """, """ + [i0, f0, p1] + p2 = getfield_gc(p1, descr=tzdescr) + stm_read(p1) + cond_call_gc_wb(p1, descr=wbdescr) + setfield_gc(p1, p2, descr=tzdescr) + + i1 = getfield_raw(ConstClass(frame_info), descr=jfi_frame_depth) + p5 = call_malloc_nursery_varsize_frame(i1) + setfield_gc(p5, 0, descr=tiddescr) + setfield_gc(p5, i1, descr=framelendescr) + setfield_gc(p5, ConstClass(frame_info), descr=jf_frame_info) + setarrayitem_gc(p5, 0, i0, descr=signedframedescr) + setarrayitem_gc(p5, 1, f0, descr=floatframedescr) + i3 = call_assembler(p5, descr=casmdescr) + guard_not_forced() [] + + p3 = getfield_gc(p1, descr=tzdescr) + stm_read(p1) + cond_call_gc_wb(p1, descr=wbdescr) + setfield_gc(p1, p3, descr=tzdescr) """) def test_ptr_eq_null(self): @@ -761,7 +795,7 @@ """, """ [p1, p2] i1 = ptr_eq(p1, NULL) - + jump(i1) """) @@ -773,7 +807,7 @@ """, """ [p1, p2] i1 = ptr_eq(p1, p2) - + jump(i1) """) @@ -785,7 +819,7 @@ """, """ [p1, p2] i1 = instance_ptr_eq(p1, p2) - + jump(i1) """) @@ -797,7 +831,7 @@ """, """ [p1, p2] i1 = ptr_ne(p1, p2) - + jump(i1) """) @@ -809,7 +843,7 @@ """, """ [p1, p2] i1 = instance_ptr_ne(p1, p2) - + jump(i1) """) @@ -926,7 +960,7 @@ [i0] p0 = call_malloc_nursery_varsize(0, 1, i0, descr=bdescr) setfield_gc(p0, i0, descr=blendescr) - + jump(i0) """) @@ -939,7 +973,7 @@ [i0] p0 = call_malloc_nursery_varsize(1, 1, i0, descr=strdescr) setfield_gc(p0, i0, descr=strlendescr) - + jump(i0) """) @@ -963,7 +997,7 @@ %(nonstd_descr.lendescr.offset)d, \ 6464, i0, \ descr=malloc_array_nonstandard_descr) - + jump(i0) """, nonstd_descr=nonstd_descr) @@ -978,7 +1012,7 @@ p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ %(bdescr.tid)d, 103, \ descr=malloc_array_descr) - + jump() """) @@ -1015,7 +1049,7 @@ p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ %(bdescr.tid)d, 20000000, \ descr=malloc_array_descr) - + jump() """) @@ -1110,7 +1144,7 @@ setfield_gc(p1, 5, descr=blendescr) stm_transaction_break(1) - + p2 = call_malloc_nursery( \ %(bdescr.basesize + 8)d) setfield_gc(p2, 8765, descr=tiddescr) From noreply at buildbot.pypy.org Wed Mar 26 15:55:17 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 26 Mar 2014 15:55:17 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Removed obsolete method. Message-ID: <20140326145517.43DDF1C066C@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r699:12f5258a6002 Date: 2014-03-26 13:20 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/12f5258a6002/ Log: Removed obsolete method. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -166,9 +166,6 @@ def invariant(self): return isinstance(self.value, int) and self.value < 0x8000 - def make_copy(self, space): - return space.wrap_int(space.unwrap_int(self)) - def lshift(self, space, shift): from rpython.rlib.rarithmetic import ovfcheck, intmask, r_uint # shift > 0, therefore the highest bit of upperbound is not set, From noreply at buildbot.pypy.org Wed Mar 26 15:55:18 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 26 Mar 2014 15:55:18 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Removed hack for loading mini.image, replaced with clean distinguishing between modern and non-modern images. Message-ID: <20140326145518.668E21C066C@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r700:fac293b61449 Date: 2014-03-26 13:58 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/fac293b61449/ Log: Removed hack for loading mini.image, replaced with clean distinguishing between modern and non-modern images. diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -130,15 +130,6 @@ self._s_methoddict = w_val.as_methoddict_get_shadow(self.space) self._s_methoddict.s_class = self elif n0 == constants.CLASS_FORMAT_INDEX: - if not isinstance(w_val, model.W_SmallInteger): - # TODO -- anton -- this happens with mini.image and other images (but not Squeak*.image) - # You can try something like the following after all g_objects have been initialized in the ImageReader: - # special_objects[11] == special_objects[36].g_class - # TODO -- fix images or think of a more appropriate hack. - self._instance_size = constants.BLKCLSR_SIZE - self.instance_varsized = True - return - # read and painfully decode the format classformat = self.space.unwrap_int(w_val) # The classformat in Squeak, as an integer value, is: diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -213,6 +213,7 @@ self.space = space self.stream = stream self.version = version + self.is_modern = self.version.magic > 6502 # dictionary mapping old address to chunk object self.chunks = {} self.chunklist = [] @@ -280,7 +281,15 @@ def assign_prebuilt_constants(self): # Assign classes and objects that in special objects array that are already created. self._assign_prebuilt_constants(constants.objects_in_special_object_table, self.space.objtable) - self._assign_prebuilt_constants(constants.classes_in_special_object_table, self.space.classtable) + if not self.is_modern: + classtable = {} + for name, so_index in self.space.classtable.items(): + # In non-modern images (pre 4.0), there was no BlockClosure class. + if not name == "BlockClosure": + classtable[name] = so_index + else: + classtable = self.space.classtable + self._assign_prebuilt_constants(constants.classes_in_special_object_table, classtable) def _assign_prebuilt_constants(self, names_and_indices, prebuilt_objects): for name, so_index in names_and_indices.items(): @@ -369,7 +378,7 @@ self.w_simulateCopyBits = self.find_symbol(space, reader, "simulateCopyBits") self.lastWindowSize = reader.lastWindowSize self.version = reader.version - self.is_modern = reader.version.magic > 6502 + self.is_modern = reader.is_modern self.run_spy_hacks(space) self.startup_time = time.time() From noreply at buildbot.pypy.org Wed Mar 26 15:55:19 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 26 Mar 2014 15:55:19 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Removed empty file. Message-ID: <20140326145519.7DCB61C066C@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r701:27ce1bfd3196 Date: 2014-03-26 14:30 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/27ce1bfd3196/ Log: Removed empty file. diff --git a/a b/a deleted file mode 100644 From noreply at buildbot.pypy.org Wed Mar 26 15:55:20 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 26 Mar 2014 15:55:20 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Cleaned up bootstrapping of ObjSpace and especially BootstrappedObjSpace for tests. Message-ID: <20140326145520.AD1361C066C@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r702:5d8e0a95e3f9 Date: 2014-03-26 15:41 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/5d8e0a95e3f9/ Log: Cleaned up bootstrapping of ObjSpace and especially BootstrappedObjSpace for tests. Tests now selectively bootstrap the ObjSpace only if they require the core classes. This way, many tests work with the real, non-bootstrapped, ObjSpace. diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -41,17 +41,25 @@ assert i > 0 self._executable_path[0] = fullpath[:i] + def populate_special_objects(self, specials): + for name, idx in constants.objects_in_special_object_table.items(): + name = "w_" + name + if not name in self.objtable or not self.objtable[name]: + self.add_bootstrap_object(name, specials[idx]) + def executable_path(self): return self._executable_path[0] - + + def add_bootstrap_class(self, name, cls): + self.classtable[name] = cls + setattr(self, name, cls) + def make_bootstrap_classes(self): names = [ "w_" + name for name in constants.classes_in_special_object_table.keys() ] for name in names: cls = model.W_PointersObject(self, None, 0) - self.classtable[name] = cls - setattr(self, name, cls) - # Make sure that all prebuilt classes are actually used in the special classes array - + self.add_bootstrap_class(name, cls) + def add_bootstrap_object(self, name, obj): self.objtable[name] = obj setattr(self, name, obj) @@ -60,15 +68,7 @@ obj = model.W_PointersObject(self, None, 0) self.add_bootstrap_object(name, obj) - def make_bootstrap_objects(self): - self.make_bootstrap_object("w_true") - self.make_bootstrap_object("w_false") - self.make_bootstrap_object("w_special_selectors") - self.add_bootstrap_object("w_minus_one", model.W_SmallInteger(-1)) - self.add_bootstrap_object("w_zero", model.W_SmallInteger(0)) - self.add_bootstrap_object("w_one", model.W_SmallInteger(1)) - self.add_bootstrap_object("w_two", model.W_SmallInteger(2)) - + def make_character_table(self): def build_char(i): # TODO - This is pretty hacky, maybe not required? At least eliminate the constant 1. w_cinst = model.W_PointersObject(self, self.w_Character, 1) @@ -79,12 +79,24 @@ for i in range(256): char_table.store(self, i, build_char(i)) self.add_bootstrap_object("w_charactertable", char_table) + + def make_bootstrap_objects(self): + self.make_character_table() + self.make_bootstrap_object("w_true") + self.make_bootstrap_object("w_false") + self.make_bootstrap_object("w_special_selectors") + self.add_bootstrap_object("w_minus_one", model.W_SmallInteger(-1)) + self.add_bootstrap_object("w_zero", model.W_SmallInteger(0)) + self.add_bootstrap_object("w_one", model.W_SmallInteger(1)) + self.add_bootstrap_object("w_two", model.W_SmallInteger(2)) + # Certain special objects are already created. The rest will be + # populated when the image is loaded, but prepare empty slots for them. for name in constants.objects_in_special_object_table: name = "w_" + name if not name in self.objtable: self.add_bootstrap_object(name, None) - + @specialize.arg(1) def get_special_selector(self, selector): i0 = constants.find_selectorindex(selector) diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -370,10 +370,7 @@ self.special_objects = [g_object.w_object for g_object in reader.chunks[reader.specialobjectspointer] .g_object.pointers] - - for name, idx in constants.objects_in_special_object_table.items(): - space.objtable["w_" + name] = self.special_objects[idx] - + space.populate_special_objects(self.special_objects) self.w_asSymbol = self.find_symbol(space, reader, "asSymbol") self.w_simulateCopyBits = self.find_symbol(space, reader, "simulateCopyBits") self.lastWindowSize = reader.lastWindowSize diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -5,7 +5,7 @@ from spyvm.conftest import option def setup_module(): - space, interp = create_space_interp() + space, interp = create_space_interp(bootstrap = True) copy_to_module(locals(), __name__) def teardown_module(): diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -6,7 +6,7 @@ from .util import create_space, copy_to_module, cleanup_module def setup_module(): - space = create_space() + space = create_space(bootstrap = True) bootstrap_class = space.bootstrap_class w_foo = space.wrap_string("foo") w_bar = space.wrap_string("bar") diff --git a/spyvm/test/test_objectspace.py b/spyvm/test/test_objectspace.py --- a/spyvm/test/test_objectspace.py +++ b/spyvm/test/test_objectspace.py @@ -4,7 +4,7 @@ from .util import create_space, copy_to_module, cleanup_module def setup_module(): - space = create_space() + space = create_space(bootstrap = True) copy_to_module(locals(), __name__) def teardown_module(): diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -9,7 +9,7 @@ from .test_interpreter import _new_frame def setup_module(): - space = create_space() + space = create_space(bootstrap = True) wrap = space.w bootstrap_class = space.bootstrap_class copy_to_module(locals(), __name__) diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -4,7 +4,7 @@ from test_model import joinbits def setup_module(): - space = create_space() + space = create_space(bootstrap = True) w_Object = space.classtable['w_Object'] w_Metaclass = space.classtable['w_Metaclass'] w_MethodDict = space.classtable['w_MethodDict'] diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -2,13 +2,17 @@ from spyvm import model, shadow, objspace, version, constants, squeakimage, interpreter from rpython.rlib.objectmodel import instantiate +# Most tests don't need a bootstrapped objspace. Those that do, indicate so explicitely. +# This way, as many tests as possible use the real, not-bootstrapped ObjSpace. +bootstrap_by_default = False + def open_reader(space, imagefilename): from spyvm.tool.analyseimage import image_dir imagefilename = image_dir.join(imagefilename) return squeakimage.reader_for_image(space, squeakimage.Stream(imagefilename.open(mode="rb"))) -def read_image(image_filename): - space = create_space() +def read_image(image_filename, bootstrap = bootstrap_by_default): + space = create_space(bootstrap) reader = open_reader(space, image_filename) reader.initialize() image = squeakimage.SqueakImage() @@ -16,11 +20,14 @@ interp = interpreter.Interpreter(space, image) return space, interp, image, reader -def create_space(): - return BootstrappedObjSpace() +def create_space(bootstrap = bootstrap_by_default): + space = BootstrappedObjSpace() + if bootstrap: + space.bootstrap() + return space -def create_space_interp(): - space = create_space() +def create_space_interp(bootstrap = bootstrap_by_default): + space = create_space(bootstrap) interp = interpreter.Interpreter(space) return space, interp @@ -49,29 +56,20 @@ class BootstrappedObjSpace(objspace.ObjSpace): - def w(self, any): - if any is None: return self.w_nil - if isinstance(any, model.W_Object): return any - if isinstance(any, str): - # assume never have strings of length 1 - if len(any) == 1: - return self.wrap_char(any) - else: - return self.wrap_string(any) - if isinstance(any, bool): return self.wrap_bool(any) - if isinstance(any, int): return self.wrap_int(any) - if isinstance(any, float): return self.wrap_float(any) - if isinstance(any, list): return self.wrap_list(any) - raise Exception("Cannot wrap %r" % any) + def bootstrap(self): + # Fill this ObjSpace up with class complete core hierarchies and patch core objects. + self.create_core_classes() + self.patch_bootstrap_classes() + self.patch_bootstrap_objects() - def make_bootstrap_classes(self): + def create_core_classes(self): def define_core_cls(name, w_superclass, w_metaclass): assert name.startswith('w_') w_class = self.bootstrap_class(instsize=0, # XXX w_superclass=w_superclass, w_metaclass=w_metaclass, name=name[2:]) - self.classtable[name] = w_class + self.add_bootstrap_class(name, w_class) return w_class # A complete minimal setup (including Behavior) would look like this @@ -105,111 +103,95 @@ meta_super_nm = super_cls_nm + "Class" w_metacls = define_core_cls(meta_nm, self.classtable[meta_super_nm], None) define_core_cls(cls_nm, self.classtable[super_cls_nm], w_metacls) - w_Class = self.classtable["w_Class"] - w_Metaclass = self.classtable["w_Metaclass"] - # XXX proto_shadow = w_ProtoObjectClass.shadow - proto_shadow.store_w_superclass(w_Class) - # at this point, all classes that still lack a w_class are themselves - # metaclasses + proto_shadow.store_w_superclass(self.w_Class) + # at this point, all classes that still lack a w_class are themselves metaclasses for nm, w_cls_obj in self.classtable.items(): if w_cls_obj.w_class is None: - w_cls_obj.w_class = w_Metaclass - - def define_cls(cls_nm, supercls_nm, instvarsize=0, format=shadow.POINTERS, - varsized=False): - assert cls_nm.startswith("w_") + w_cls_obj.w_class = self.w_Metaclass + + def patch_bootstrap_classes(self): + # Create all classes in the class hierarchies of the classes in the special objects array. + def create_metaclass(cls_nm, supercls_nm): meta_nm = cls_nm + "Class" meta_super_nm = supercls_nm + "Class" - w_Metaclass = self.classtable["w_Metaclass"] - w_meta_cls = self.classtable[meta_nm] = \ - self.bootstrap_class(0, # XXX + w_meta_cls = self.bootstrap_class(0, # XXX self.classtable[meta_super_nm], - w_Metaclass, + self.w_Metaclass, name=meta_nm[2:]) - w_cls = self.classtable[cls_nm] = \ - self.bootstrap_class(instvarsize, + self.add_bootstrap_class(meta_nm, w_meta_cls) + return w_meta_cls + def define_cls(cls_nm, supercls_nm, instvarsize=0, format=shadow.POINTERS, varsized=False): + assert cls_nm.startswith("w_") + w_meta_cls = create_metaclass(cls_nm, supercls_nm) + w_cls = self.bootstrap_class(instvarsize, self.classtable[supercls_nm], w_meta_cls, format=format, varsized=varsized, name=cls_nm[2:]) - + self.add_bootstrap_class(cls_nm, w_cls) + return w_cls define_cls("w_Magnitude", "w_Object") - define_cls("w_Character", "w_Magnitude", instvarsize=1) define_cls("w_Number", "w_Magnitude") define_cls("w_Integer", "w_Number") - define_cls("w_SmallInteger", "w_Integer") - define_cls("w_LargePositiveInteger", "w_Integer", format=shadow.BYTES) - define_cls("w_Float", "w_Number", format=shadow.BYTES) - define_cls("w_Message", "w_Object") define_cls("w_Collection", "w_Object") define_cls("w_SequenceableCollection", "w_Collection") define_cls("w_ArrayedCollection", "w_SequenceableCollection") - define_cls("w_Array", "w_ArrayedCollection", varsized=True) - define_cls("w_String", "w_ArrayedCollection", format=shadow.BYTES) - define_cls("w_Bitmap", "w_ArrayedCollection", varsized=True, format=shadow.WORDS) + define_cls("w_MethodDict", "w_Object", instvarsize=2, varsized=True) + define_cls("w_ContextPart", "w_Object") + define_cls("w_Link", "w_Object") + define_cls("w_LinkedList", "w_SequenceableCollection") + + # Also create classes for the objects in the special objects array define_cls("w_UndefinedObject", "w_Object") define_cls("w_Boolean", "w_Object") define_cls("w_True", "w_Boolean") define_cls("w_False", "w_Boolean") - define_cls("w_ByteArray", "w_ArrayedCollection", format=shadow.BYTES) - define_cls("w_MethodDict", "w_Object", instvarsize=2, varsized=True) - define_cls("w_CompiledMethod", "w_ByteArray", format=shadow.COMPILED_METHOD) - define_cls("w_ContextPart", "w_Object") - define_cls("w_MethodContext", "w_ContextPart") - define_cls("w_Link", "w_Object") - define_cls("w_Process", "w_Link") - define_cls("w_Point", "w_Object") - define_cls("w_LinkedList", "w_SequenceableCollection") - define_cls("w_Semaphore", "w_LinkedList") - define_cls("w_BlockContext", "w_ContextPart", - instvarsize=constants.BLKCTX_STACK_START) - define_cls("w_BlockClosure", "w_Object", - instvarsize=constants.BLKCLSR_SIZE, - varsized=True) - # make better accessors for classes that can be found in special object - # table - for name in constants.classes_in_special_object_table.keys(): - name = 'w_' + name - setattr(self, name, self.classtable.get(name)) + + # Now patch up the already created special classes + def patch_special_cls(cls_nm, supercls_nm, instvarsize=0, format=shadow.POINTERS, varsized=False): + assert cls_nm.startswith("w_") + w_meta_cls = create_metaclass(cls_nm, supercls_nm) + + # Now patch up the existing class object + w_cls = self.classtable[cls_nm] + assert w_cls, "This class should have been created in ObjSpace!" + self.patch_class(w_cls, + instvarsize, + self.classtable[supercls_nm], + w_meta_cls, + format=format, + varsized=varsized, + name=cls_nm[2:]) + patch_special_cls("w_Bitmap", "w_ArrayedCollection", varsized=True, format=shadow.WORDS) + patch_special_cls("w_SmallInteger", "w_Integer") + patch_special_cls("w_String", "w_ArrayedCollection", format=shadow.BYTES) + patch_special_cls("w_Array", "w_ArrayedCollection", varsized=True) + patch_special_cls("w_Float", "w_Number", format=shadow.BYTES) + patch_special_cls("w_MethodContext", "w_ContextPart") + patch_special_cls("w_BlockContext", "w_ContextPart", instvarsize=constants.BLKCTX_STACK_START) + patch_special_cls("w_BlockClosure", "w_Object", instvarsize=constants.BLKCLSR_SIZE, varsized=True) + patch_special_cls("w_Point", "w_Object") + patch_special_cls("w_LargePositiveInteger", "w_Integer", format=shadow.BYTES) + patch_special_cls("w_Message", "w_Object") + patch_special_cls("w_ByteArray", "w_ArrayedCollection", format=shadow.BYTES) + patch_special_cls("w_CompiledMethod", "w_ByteArray", format=shadow.COMPILED_METHOD) + patch_special_cls("w_Semaphore", "w_LinkedList") + patch_special_cls("w_Character", "w_Magnitude", instvarsize=1) + patch_special_cls("w_Process", "w_Link") + + def patch_bootstrap_objects(self): + def patch_bootstrap_object(obj, cls, size): + obj.w_class = cls + obj.initialize_storage(self, size) + patch_bootstrap_object(self.w_nil, self.w_UndefinedObject, 0) + patch_bootstrap_object(self.w_true, self.w_True, 0) + patch_bootstrap_object(self.w_false, self.w_False, 0) + patch_bootstrap_object(self.w_special_selectors, self.w_Array, len(constants.SPECIAL_SELECTORS) * 2) - def make_bootstrap_objects(self): - def bld_char(i): - w_cinst = self.w_Character.as_class_get_shadow(self).new() - w_cinst.store(self, constants.CHARACTER_VALUE_INDEX, - model.W_SmallInteger(i)) - return w_cinst - w_charactertable = model.W_PointersObject(self, self.classtable['w_Array'], 256) - self.add_bootstrap_object("w_charactertable", w_charactertable) - for i in range(256): - self.w_charactertable.atput0(self, i, bld_char(i)) - - # w_nil is already added to objtable in constructor. - self.w_nil.w_class = self.classtable['w_UndefinedObject'] - self.w_nil.initialize_storage(self, 0) - - w_true = self.classtable['w_True'].as_class_get_shadow(self).new() - self.add_bootstrap_object("w_true", w_true) - w_false = self.classtable['w_False'].as_class_get_shadow(self).new() - self.add_bootstrap_object("w_false", w_false) - - self.add_bootstrap_object("w_minus_one", model.W_SmallInteger(-1)) - self.add_bootstrap_object("w_zero", model.W_SmallInteger(0)) - self.add_bootstrap_object("w_one", model.W_SmallInteger(1)) - self.add_bootstrap_object("w_two", model.W_SmallInteger(2)) - w_special_selectors = model.W_PointersObject(self, - self.classtable['w_Array'], len(constants.SPECIAL_SELECTORS) * 2) - self.add_bootstrap_object("w_special_selectors", w_special_selectors) - - for name in constants.objects_in_special_object_table: - name = "w_" + name - if not name in self.objtable: - self.add_bootstrap_object(name, None) - - def bootstrap_class(self, instsize, w_superclass=None, w_metaclass=None, + def patch_class(self, w_class, instsize, w_superclass=None, w_metaclass=None, name='?', format=shadow.POINTERS, varsized=False): - w_class = model.W_PointersObject(self, w_metaclass, 0) s = instantiate(shadow.ClassShadow) s.space = self s.version = version.Version() @@ -223,8 +205,29 @@ s._s_methoddict = None s.instance_varsized = varsized or format != shadow.POINTERS w_class.store_shadow(s) + w_class.w_class = w_metaclass + + def bootstrap_class(self, instsize, w_superclass=None, w_metaclass=None, + name='?', format=shadow.POINTERS, varsized=False): + w_class = model.W_PointersObject(self, w_metaclass, 0) + self.patch_class(w_class, instsize, w_superclass, w_metaclass, name, format, varsized) return w_class + def w(self, any): + if any is None: return self.w_nil + if isinstance(any, model.W_Object): return any + if isinstance(any, str): + # assume never have strings of length 1 + if len(any) == 1: + return self.wrap_char(any) + else: + return self.wrap_string(any) + if isinstance(any, bool): return self.wrap_bool(any) + if isinstance(any, int): return self.wrap_int(any) + if isinstance(any, float): return self.wrap_float(any) + if isinstance(any, list): return self.wrap_list(any) + raise Exception("Cannot wrap %r" % any) + def initialize_class(self, w_class, interp): initialize_symbol = find_symbol_in_methoddict_of("initialize", w_class.class_shadow(self)) From noreply at buildbot.pypy.org Wed Mar 26 15:55:21 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 26 Mar 2014 15:55:21 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: test_wrapper also needs a bootstrapped space. Message-ID: <20140326145521.C88C21C066C@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r703:9f2f59e9acaf Date: 2014-03-26 15:55 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/9f2f59e9acaf/ Log: test_wrapper also needs a bootstrapped space. Removed breakpoint. Fixed RPython compile error. diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -45,7 +45,7 @@ for name, idx in constants.objects_in_special_object_table.items(): name = "w_" + name if not name in self.objtable or not self.objtable[name]: - self.add_bootstrap_object(name, specials[idx]) + self.objtable[name] = specials[idx] def executable_path(self): return self._executable_path[0] diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -404,7 +404,6 @@ # raise ClassShadowError("bogus selector in method dict") w_compiledmethod = w_values.fetch(self.space, i) if not isinstance(w_compiledmethod, model.W_CompiledMethod): - import pdb; pdb.set_trace() raise ClassShadowError("The methoddict must contain " "CompiledMethods only, for now. " "If the value observed is nil, our " diff --git a/spyvm/test/test_wrapper.py b/spyvm/test/test_wrapper.py --- a/spyvm/test/test_wrapper.py +++ b/spyvm/test/test_wrapper.py @@ -5,7 +5,7 @@ from spyvm.test.test_interpreter import _new_frame def setup_module(): - space = create_space() + space = create_space(bootstrap = True) copy_to_module(locals(), __name__) def teardown_module(): From noreply at buildbot.pypy.org Wed Mar 26 18:21:14 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 26 Mar 2014 18:21:14 +0100 (CET) Subject: [pypy-commit] stmgc default: initial HTM version Message-ID: <20140326172114.256CD1C066C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1097:e43e756ae446 Date: 2014-03-26 18:21 +0100 http://bitbucket.org/pypy/stmgc/changeset/e43e756ae446/ Log: initial HTM version diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -25,5 +25,13 @@ clang -DSTM_DEBUGPRINT -pthread -g -DDu_DEBUG -o duhton_debug_nostm *.c ../gil-c7/stmgc.c -Wall -DUSE_GIL -ferror-limit=1 +duhton_htm: *.c *.h ../htm-c7/stmgc.? ../htm-c7/htm.h + clang -pthread -g -DNDEBUG -O2 -o duhton_htm *.c ../htm-c7/stmgc.c -Wall -DUSE_HTM + + +duhton_debug_htm: *.c *.h ../htm-c7/stmgc.? ../htm-c7/htm.h + clang -DSTM_DEBUGPRINT -pthread -g -DDu_DEBUG -o duhton_debug_htm *.c ../htm-c7/stmgc.c -Wall -DUSE_HTM -ferror-limit=1 + + clean: rm -f duhton duhton_debug duhton_release diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -6,11 +6,16 @@ #include #include #include + #ifdef USE_GIL # include "../gil-c7/stmgc.h" #else +#ifdef USE_HTM +# include "../htm-c7/stmgc.h" +#else # include "../c7/stmgc.h" #endif +#endif extern __thread stm_thread_local_t stm_thread_local; diff --git a/gil-c7/stmgc.h b/gil-c7/stmgc.h --- a/gil-c7/stmgc.h +++ b/gil-c7/stmgc.h @@ -11,6 +11,8 @@ #define TLPREFIX /* nothing */ +#define STM_NB_SEGMENTS 4 + typedef struct { /* empty */ } stm_jmpbuf_t; diff --git a/htm-c7/htm.h b/htm-c7/htm.h new file mode 100644 --- /dev/null +++ b/htm-c7/htm.h @@ -0,0 +1,73 @@ +#ifndef _HTM_H +#define _HTM_H + +#include +#include +#include + + + +#define XBEGIN_OK (~0) +#define XBEGIN_UNKNOWN (0) +#define XBEGIN_XABORT (1 << 0) +#define XBEGIN_MAYBE_RETRY (1 << 1) +#define XBEGIN_NORMAL_CONFLICT (1 << 2) +#define XBEGIN_BUFFER_OVERFLOW (1 << 3) +#define XBEGIN_DEBUG (1 << 4) +#define XBEGIN_NESTED_ABORT (1 << 5) +#define XBEGIN_XABORT_ARG(x) (((x) >> 24) & 0xFF) + +static __thread char buf[128]; +char* xbegin_status(int status) +{ + if (status == XBEGIN_OK) + snprintf(buf, 128, "OK"); + else if (status == XBEGIN_UNKNOWN) + snprintf(buf, 128, "UNKNOWN"); + else if (status & XBEGIN_XABORT) + snprintf(buf, 128, "XABORT(%d)", XBEGIN_XABORT_ARG(status)); + else if (status & XBEGIN_MAYBE_RETRY) + snprintf(buf, 128, "MAYBE_RETRY"); + else if (status & XBEGIN_NORMAL_CONFLICT) + snprintf(buf, 128, "NORMAL_CONFLICT"); + else if (status & XBEGIN_BUFFER_OVERFLOW) + snprintf(buf, 128, "BUFFER_OVERFLOW"); + else if (status & XBEGIN_DEBUG) + snprintf(buf, 128, "DEBUG"); + else if (status & XBEGIN_NESTED_ABORT) + snprintf(buf, 128, "NESTED_ABORT"); + return buf; +} + +static __attribute__((__always_inline__)) inline int xbegin() +{ + int result = XBEGIN_OK; + asm volatile(".byte 0xC7, 0xF8; .long 0" : "+a" (result) :: "memory"); + return result; +} + +static __attribute__((__always_inline__)) inline void xend() +{ + asm volatile(".byte 0x0F, 0x01, 0xD5" ::: "memory"); +} + +#define xabort(argument) do { asm volatile(".byte 0xC6, 0xF8, %P0" :: "i" (argument) : "memory"); } while (0); + + + +static __attribute__((__always_inline__)) inline int xtest() +{ + unsigned char result; + asm volatile(".byte 0x0F, 0x01, 0xD6; setnz %0" : "=r" (result) :: "memory"); + return result; +} + + +static __attribute__((__always_inline__)) inline int mutex_locked(pthread_mutex_t* mut) +{ + return !!mut->__data.__lock; +} + + + +#endif diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c new file mode 100644 --- /dev/null +++ b/htm-c7/stmgc.c @@ -0,0 +1,402 @@ +#include "stmgc.h" +#include +#include +#include "htm.h" + +pthread_mutex_t _stm_gil = PTHREAD_MUTEX_INITIALIZER; +stm_thread_local_t *_stm_tloc; +struct stm_segment_info_s _stm_segment; + + + + +static void acquire_gil(stm_thread_local_t *tl) { + if (pthread_mutex_lock(&_stm_gil) == 0) { + _stm_tloc = tl; + return; + } + abort(); +} + +void stm_start_inevitable_transaction(stm_thread_local_t *tl) { + if (mutex_locked(&_stm_gil)) { + acquire_gil(tl); + return; + } + + int status; + transaction_retry: + if ((status = xbegin()) == XBEGIN_OK) { + if (mutex_locked(&_stm_gil)) + xabort(0); + /* transaction OK */ + } + else { + if (mutex_locked(&_stm_gil)) + acquire_gil(tl); + else if (status & (XBEGIN_MAYBE_RETRY | XBEGIN_NORMAL_CONFLICT)) + goto transaction_retry; + else + acquire_gil(tl); + } + + _stm_tloc = tl; +} + +void stm_commit_transaction(void) { + stm_collect(0); + _stm_tloc = NULL; + if (mutex_locked(&_stm_gil)) { + assert(!xtest()); + if (pthread_mutex_unlock(&_stm_gil) != 0) abort(); + } else { + xend(); + } +} + + + + + + +/************************************************************/ + +struct list_s { + uintptr_t count; + uintptr_t last_allocated; + uintptr_t items[]; +}; + +static struct list_s *list_create(void); + +static inline void list_free(struct list_s *lst) +{ + free(lst); +} + +#define LIST_FREE(lst) (list_free(lst), (lst) = NULL) + + +static struct list_s *_list_grow(struct list_s *, uintptr_t); + +static inline struct list_s *list_append(struct list_s *lst, uintptr_t item) +{ + uintptr_t index = lst->count++; + if (UNLIKELY(index > lst->last_allocated)) + lst = _list_grow(lst, index); + lst->items[index] = item; + return lst; +} + +#define LIST_APPEND(lst, e) ((lst) = list_append((lst), (uintptr_t)(e))) + + +__attribute__((unused)) +static inline void list_clear(struct list_s *lst) +{ + lst->count = 0; +} + +static inline bool list_is_empty(struct list_s *lst) +{ + return (lst->count == 0); +} + +__attribute__((unused)) +static inline uintptr_t list_count(struct list_s *lst) +{ + return lst->count; +} + +static inline uintptr_t list_pop_item(struct list_s *lst) +{ + assert(lst->count > 0); + return lst->items[--lst->count]; +} + +__attribute__((unused)) +static inline uintptr_t list_item(struct list_s *lst, uintptr_t index) +{ + return lst->items[index]; +} + +__attribute__((unused)) +static inline void list_set_item(struct list_s *lst, uintptr_t index, + uintptr_t newitem) +{ + lst->items[index] = newitem; +} + +#define LIST_FOREACH_R(lst, TYPE, CODE) \ + do { \ + struct list_s *_lst = (lst); \ + uintptr_t _i; \ + for (_i = _lst->count; _i--; ) { \ + TYPE item = (TYPE)_lst->items[_i]; \ + CODE; \ + } \ + } while (0) + +#define LIST_SETSIZE(n) (sizeof(struct list_s) + LIST_ITEMSSIZE(n)) +#define LIST_ITEMSSIZE(n) ((n) * sizeof(uintptr_t)) +#define LIST_OVERCNT(n) (33 + ((((n) / 2) * 3) | 1)) + +static struct list_s *list_create(void) +{ + uintptr_t initial_allocation = 32; + struct list_s *lst = malloc(LIST_SETSIZE(initial_allocation)); + if (lst == NULL) + abort(); + + lst->count = 0; + lst->last_allocated = initial_allocation - 1; + return lst; +} + +static struct list_s *_list_grow(struct list_s *lst, uintptr_t nalloc) +{ + nalloc = LIST_OVERCNT(nalloc); + lst = realloc(lst, LIST_SETSIZE(nalloc)); + if (lst == NULL) + abort(); + + lst->last_allocated = nalloc - 1; + return lst; +} + +/************************************************************/ + +#define GCFLAG_WRITE_BARRIER _STM_GCFLAG_WRITE_BARRIER + +static struct list_s *objects_pointing_to_nursery; +static struct list_s *young_weakrefs; + +void stm_setup(void) +{ + objects_pointing_to_nursery = list_create(); + young_weakrefs = list_create(); +} + +void stm_teardown(void) +{ + list_free(objects_pointing_to_nursery); +} + +void _stm_write_slowpath(object_t *obj) +{ + obj->gil_flags &= ~GCFLAG_WRITE_BARRIER; + LIST_APPEND(objects_pointing_to_nursery, obj); +} + +object_t *_stm_allocate_old(ssize_t size) +{ + char *p = malloc(size); + assert(p); + memset(p, 0, size); + ((object_t *)p)->gil_flags = _STM_GCFLAG_WRITE_BARRIER; + return (object_t *)p; +} + +object_t *_stm_allocate_external(ssize_t size) +{ + char *p = malloc(size); + assert(p); + memset(p, 0, size); + _stm_write_slowpath((object_t *)p); + return (object_t *)p; +} + +/************************************************************/ + + +#define NB_NURSERY_PAGES 1024 // 4MB +#define NURSERY_SIZE (NB_NURSERY_PAGES * 4096UL) + +char *_stm_nursery_base = NULL; +char *_stm_nursery_current = NULL; +char *_stm_nursery_end = NULL; +#define _stm_nursery_start ((uintptr_t)_stm_nursery_base) + +static bool _is_in_nursery(object_t *obj) +{ + return ((char *)obj >= _stm_nursery_base && + (char *)obj < _stm_nursery_end); +} + +long stm_can_move(object_t *obj) +{ + return _is_in_nursery(obj); +} + +#define GCWORD_MOVED ((object_t *) -42) + +static void minor_trace_if_young(object_t **pobj) +{ + object_t *obj = *pobj; + object_t *nobj; + + if (obj == NULL) + return; + + if (_is_in_nursery(obj)) { + /* If the object was already seen here, its first word was set + to GCWORD_MOVED. In that case, the forwarding location, i.e. + where the object moved to, is stored in the second word in 'obj'. */ + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; + + if (pforwarded_array[0] == GCWORD_MOVED) { + *pobj = pforwarded_array[1]; /* already moved */ + return; + } + + /* We need to make a copy of this object. + */ + size_t size = stmcb_size_rounded_up(obj); + + nobj = malloc(size); + assert(nobj); + + /* Copy the object */ + memcpy(nobj, obj, size); + + /* Done copying the object. */ + //dprintf(("\t\t\t\t\t%p -> %p\n", obj, nobj)); + pforwarded_array[0] = GCWORD_MOVED; + pforwarded_array[1] = nobj; + *pobj = nobj; + } + + else { + /* The object was not in the nursery at all */ + return; + } + + /* Must trace the object later */ + LIST_APPEND(objects_pointing_to_nursery, nobj); +} + +static void collect_roots_in_nursery(void) +{ + object_t **current = _stm_tloc->shadowstack; + object_t **base = _stm_tloc->shadowstack_base; + while (current-- != base) { + minor_trace_if_young(current); + } + minor_trace_if_young(&_stm_tloc->thread_local_obj); +} + +static inline void _collect_now(object_t *obj) +{ + assert(!_is_in_nursery(obj)); + + /* We must not have GCFLAG_WRITE_BARRIER so far. Add it now. */ + assert(!(obj->gil_flags & GCFLAG_WRITE_BARRIER)); + obj->gil_flags |= GCFLAG_WRITE_BARRIER; + + /* Trace the 'obj' to replace pointers to nursery with pointers + outside the nursery, possibly forcing nursery objects out and + adding them to 'objects_pointing_to_nursery' as well. */ + stmcb_trace(obj, &minor_trace_if_young); +} + +static void collect_oldrefs_to_nursery(void) +{ + struct list_s *lst = objects_pointing_to_nursery; + + while (!list_is_empty(lst)) { + object_t *obj = (object_t *)list_pop_item(lst); + + _collect_now(obj); + + /* the list could have moved while appending */ + lst = objects_pointing_to_nursery; + } +} + +static void throw_away_nursery(void) +{ + if (_stm_nursery_base == NULL) { + _stm_nursery_base = malloc(NURSERY_SIZE); + assert(_stm_nursery_base); + _stm_nursery_end = _stm_nursery_base + NURSERY_SIZE; + } + + _stm_nursery_current = _stm_nursery_base; + memset(_stm_nursery_base, 0, NURSERY_SIZE); +} + +#define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((char *)(wr)) + (sz) - sizeof(void*))) + +static void move_young_weakrefs(void) +{ + LIST_FOREACH_R( + young_weakrefs, + object_t * /*item*/, + ({ + assert(_is_in_nursery(item)); + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)item; + + /* the following checks are done like in nursery.c: */ + if (pforwarded_array[0] != GCWORD_MOVED) { + /* weakref dies */ + continue; + } + + item = pforwarded_array[1]; /* moved location */ + + assert(!_is_in_nursery(item)); + + ssize_t size = 16; + object_t *pointing_to = *WEAKREF_PTR(item, size); + assert(pointing_to != NULL); + + if (_is_in_nursery(pointing_to)) { + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)pointing_to; + /* the following checks are done like in nursery.c: */ + if (pforwarded_array[0] != GCWORD_MOVED) { + /* pointing_to dies */ + *WEAKREF_PTR(item, size) = NULL; + continue; /* no need to remember in old_weakrefs */ + } + else { + /* moved location */ + *WEAKREF_PTR(item, size) = pforwarded_array[1]; + } + } + else { + /* pointing_to was already old */ + } + //LIST_APPEND(STM_PSEGMENT->old_weakrefs, item); + })); + list_clear(young_weakrefs); +} + +void stm_collect(long level) +{ + /* 'level' is ignored, only minor collections are implemented */ + collect_roots_in_nursery(); + collect_oldrefs_to_nursery(); + move_young_weakrefs(); + throw_away_nursery(); +} + +object_t *_stm_allocate_slowpath(ssize_t size_rounded_up) +{ + /* run minor collection */ + //fprintf(stderr, "minor collect\n"); + stm_collect(0); + + char *p = _stm_nursery_current; + char *end = p + size_rounded_up; + assert(end <= _stm_nursery_end); + _stm_nursery_current = end; + return (object_t *)p; +} + +object_t *stm_allocate_weakref(ssize_t size_rounded_up) +{ + assert(size_rounded_up == 16); + object_t *obj = stm_allocate(size_rounded_up); + LIST_APPEND(young_weakrefs, obj); + return obj; +} diff --git a/htm-c7/stmgc.h b/htm-c7/stmgc.h new file mode 100644 --- /dev/null +++ b/htm-c7/stmgc.h @@ -0,0 +1,152 @@ +#ifndef _STMGC_H +#define _STMGC_H + +#include +#include +#include +#include +#include +#include +#include + +#define TLPREFIX /* nothing */ + +#define STM_NB_SEGMENTS 4 + +typedef struct { /* empty */ } stm_jmpbuf_t; + +typedef struct object_s { + uint32_t gil_flags; +} object_t; + +typedef struct stm_thread_local_s { + object_t **shadowstack; + object_t **shadowstack_base; + object_t *thread_local_obj; + long last_abort__bytes_in_nursery; +} stm_thread_local_t; + +extern stm_thread_local_t *_stm_tloc; +extern char *_stm_nursery_current, *_stm_nursery_end; + +struct stm_segment_info_s { + stm_jmpbuf_t *jmpbuf_ptr; /* compat only -- always NULL */ + char *nursery_current; /* compat only -- always NULL */ +}; +extern struct stm_segment_info_s _stm_segment; +#define STM_SEGMENT (&_stm_segment) + +#ifdef NDEBUG +#define OPT_ASSERT(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) +#else +#define OPT_ASSERT(cond) assert(cond) +#endif +#define UNLIKELY(x) __builtin_expect(x, false) + +#define _STM_GCFLAG_WRITE_BARRIER 0x01 +#define _STM_FAST_ALLOC (66*1024) + + +object_t *_stm_allocate_old(ssize_t size); + +object_t *_stm_allocate_external(ssize_t); +object_t *_stm_allocate_slowpath(ssize_t); +object_t *stm_allocate_weakref(ssize_t size_rounded_up); + +__attribute__((always_inline)) +inline static object_t *stm_allocate(ssize_t size_rounded_up) { + OPT_ASSERT(size_rounded_up >= 16); + OPT_ASSERT((size_rounded_up & 7) == 0); + + if (UNLIKELY(size_rounded_up >= _STM_FAST_ALLOC)) + return _stm_allocate_external(size_rounded_up); + + char *p = _stm_nursery_current; + char *end = p + size_rounded_up; + _stm_nursery_current = end; + if (UNLIKELY(end > _stm_nursery_end)) + return _stm_allocate_slowpath(size_rounded_up); + + return (object_t *)p; +} + +inline static void stm_register_thread_local(stm_thread_local_t *tl) { + tl->thread_local_obj = NULL; + tl->shadowstack_base = (object_t **)malloc(768*1024); + assert(tl->shadowstack_base); + tl->shadowstack = tl->shadowstack_base; + tl->last_abort__bytes_in_nursery = 0; +} +inline static void stm_unregister_thread_local(stm_thread_local_t *tl) { + free(tl->shadowstack_base); +} + +extern pthread_mutex_t _stm_gil; + +void stm_setup(void); +void stm_teardown(void); +void stm_collect(long level); + + +void stm_start_inevitable_transaction(stm_thread_local_t *tl); +void stm_commit_transaction(void); + +inline static void stm_become_inevitable( + stm_thread_local_t *tl, const char *msg) { } +inline static void _stm_become_inevitable(const char *msg) { } +inline static void stm_become_globally_unique_transaction( + stm_thread_local_t *tl, const char *msg) { } + +static inline int stm_is_inevitable(void) { return 1; } +inline static void stm_read(object_t *ob) { } + +void _stm_write_slowpath(object_t *); + +__attribute__((always_inline)) +inline static void stm_write(object_t *ob) { + if (UNLIKELY(ob->gil_flags & _STM_GCFLAG_WRITE_BARRIER)) + _stm_write_slowpath(ob); +} + +inline static char *_stm_real_address(object_t *ob) { return (char *)ob; } +static inline void stm_safe_point(void) { } + +#define STM_START_TRANSACTION(tl, here) do { \ + (void)&(here); \ + stm_start_inevitable_transaction(tl); \ +} while (0) + +#define STM_PUSH_ROOT(tl, p) (*((tl).shadowstack++) = (object_t *)(p)) +#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))*(--(tl).shadowstack)) +#define STM_POP_ROOT_RET(tl) (*(--(tl).shadowstack)) + + +extern ssize_t stmcb_size_rounded_up(struct object_s *); +extern void stmcb_trace(struct object_s *, void (object_t **)); + +inline static object_t *stm_setup_prebuilt(object_t *preb) { + if (preb != NULL) + preb->gil_flags |= _STM_GCFLAG_WRITE_BARRIER; + return preb; +} +inline static object_t *stm_setup_prebuilt_weakref(object_t *preb) { + return stm_setup_prebuilt(preb); +} + +inline static long stm_identityhash(object_t *obj) { + return (long)obj; // XXX fails after a minor collection +} +inline static long stm_id(object_t *obj) { + return (long)obj; +} +inline static void stm_set_prebuilt_identityhash(object_t *obj, long hash) { + // XXX ignored +} +long stm_can_move(object_t *); + +inline static void stm_call_on_abort(stm_thread_local_t *tl, void *key, + void callback(void *)) { + // XXX ignored +} + +#endif From noreply at buildbot.pypy.org Wed Mar 26 18:44:07 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 26 Mar 2014 18:44:07 +0100 (CET) Subject: [pypy-commit] stmgc default: fixes Message-ID: <20140326174407.983F81D298A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1098:80e72e01e2b8 Date: 2014-03-26 18:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/80e72e01e2b8/ Log: fixes diff --git a/htm-c7/htm.h b/htm-c7/htm.h --- a/htm-c7/htm.h +++ b/htm-c7/htm.h @@ -18,7 +18,7 @@ #define XBEGIN_XABORT_ARG(x) (((x) >> 24) & 0xFF) static __thread char buf[128]; -char* xbegin_status(int status) +static char* xbegin_status(int status) { if (status == XBEGIN_OK) snprintf(buf, 128, "OK"); @@ -36,6 +36,9 @@ snprintf(buf, 128, "DEBUG"); else if (status & XBEGIN_NESTED_ABORT) snprintf(buf, 128, "NESTED_ABORT"); + else + snprintf(buf, 128, "WAT."); + return buf; } @@ -53,8 +56,6 @@ #define xabort(argument) do { asm volatile(".byte 0xC6, 0xF8, %P0" :: "i" (argument) : "memory"); } while (0); - - static __attribute__((__always_inline__)) inline int xtest() { unsigned char result; diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -24,20 +24,26 @@ return; } - int status; + volatile int status; transaction_retry: - if ((status = xbegin()) == XBEGIN_OK) { + + status = xbegin(); + if (status == XBEGIN_OK) { if (mutex_locked(&_stm_gil)) xabort(0); /* transaction OK */ } else { - if (mutex_locked(&_stm_gil)) + if (status & (XBEGIN_MAYBE_RETRY | XBEGIN_NORMAL_CONFLICT | XBEGIN_XABORT)) { + goto transaction_retry; + } else if (mutex_locked(&_stm_gil)) { acquire_gil(tl); - else if (status & (XBEGIN_MAYBE_RETRY | XBEGIN_NORMAL_CONFLICT)) - goto transaction_retry; - else + } + else { acquire_gil(tl); + } + + fprintf(stderr, "failed HTM: %s\n", xbegin_status(status)); } _stm_tloc = tl; @@ -49,8 +55,10 @@ if (mutex_locked(&_stm_gil)) { assert(!xtest()); if (pthread_mutex_unlock(&_stm_gil) != 0) abort(); + //fprintf(stderr, "G"); } else { xend(); + fprintf(stderr, "==== Committed HTM ====\n"); } } From noreply at buildbot.pypy.org Wed Mar 26 18:57:29 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 26 Mar 2014 18:57:29 +0100 (CET) Subject: [pypy-commit] pypy default: fix np.empty_like(dtype=None) not copying dtype (issue1715) Message-ID: <20140326175729.8831E1D298A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70297:bf6bb7bf1035 Date: 2014-03-26 13:52 -0400 http://bitbucket.org/pypy/pypy/changeset/bf6bb7bf1035/ Log: fix np.empty_like(dtype=None) not copying dtype (issue1715) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -100,7 +100,7 @@ @unwrap_spec(subok=bool) def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): w_a = convert_to_array(space, w_a) - if w_dtype is None: + if space.is_none(w_dtype): dtype = w_a.get_dtype() else: dtype = space.interp_w(descriptor.W_Dtype, diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -404,6 +404,8 @@ assert b.shape == a.shape assert b.dtype == a.dtype assert b[0,0] != 1 + b = np.empty_like(np.array(True), dtype=None) + assert b.dtype is np.dtype(bool) b = np.empty_like(a, dtype='i4') assert b.shape == a.shape assert b.dtype == np.dtype('i4') From noreply at buildbot.pypy.org Wed Mar 26 19:08:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 26 Mar 2014 19:08:16 +0100 (CET) Subject: [pypy-commit] pypy default: support order=K in array ctors (issue1711) Message-ID: <20140326180816.2728E1D240D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70298:db6f95b7c608 Date: 2014-03-26 14:05 -0400 http://bitbucket.org/pypy/pypy/changeset/db6f95b7c608/ Log: support order=K in array ctors (issue1711) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -48,6 +48,8 @@ order = 'C' else: order = space.str_w(w_order) + if order == 'K': + order = 'C' if order != 'C': # or order != 'F': raise oefmt(space.w_ValueError, "Unknown order: %s", order) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -334,6 +334,15 @@ b = array(a, dtype=float) assert b == 123.0 + a = array([[123, 456]]) + assert a.flags['C'] + b = array(a, order='K') + assert b.flags['C'] + assert (b == a).all() + b = array(a, order='K', copy=True) + assert b.flags['C'] + assert (b == a).all() + def test_dtype_attribute(self): import numpy as np a = np.array(40000, dtype='uint16') From noreply at buildbot.pypy.org Wed Mar 26 22:21:45 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 26 Mar 2014 22:21:45 +0100 (CET) Subject: [pypy-commit] pypy default: some tests are not strict ansi C, on win32 compile as C++ Message-ID: <20140326212145.1547C1C066C@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70299:ccdd17cd5300 Date: 2014-03-26 22:47 +0200 http://bitbucket.org/pypy/pypy/changeset/ccdd17cd5300/ Log: some tests are not strict ansi C, on win32 compile as C++ diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,6 +64,8 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] + # tests are not strictly ansi C compliant, compile as C++ + kwds["compile_extra"].append("/TP") # prevent linking with PythonXX.lib w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2] kwds["link_extra"] = ["/NODEFAULTLIB:Python%d%d.lib" % From noreply at buildbot.pypy.org Wed Mar 26 22:21:46 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 26 Mar 2014 22:21:46 +0100 (CET) Subject: [pypy-commit] pypy default: correct types via casting Message-ID: <20140326212146.3F9291C066C@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70300:bef3dc99bdd7 Date: 2014-03-26 23:01 +0200 http://bitbucket.org/pypy/pypy/changeset/bef3dc99bdd7/ Log: correct types via casting diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -107,10 +107,10 @@ } EnumObject; static void - enum_dealloc(EnumObject *op) + enum_dealloc(PyObject *op) { - Py_DECREF(op->ob_name); - Py_TYPE(op)->tp_free((PyObject *)op); + Py_DECREF(((EnumObject *)op)->ob_name); + Py_TYPE(op)->tp_free(op); } static PyMemberDef enum_members[] = { diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -158,14 +158,14 @@ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; - if (val->ob_type != PyExc_TypeError) + if (val->ob_type != (_typeobject *)PyExc_TypeError) Py_RETURN_FALSE; /* Normalize again */ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; - if (val->ob_type != PyExc_TypeError) + if (val->ob_type != (_typeobject *)PyExc_TypeError) Py_RETURN_FALSE; PyErr_Restore(type, val, tb); From noreply at buildbot.pypy.org Wed Mar 26 22:21:47 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 26 Mar 2014 22:21:47 +0100 (CET) Subject: [pypy-commit] pypy default: true is a keyword Message-ID: <20140326212147.765DD1C066C@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70301:050293498791 Date: 2014-03-26 23:10 +0200 http://bitbucket.org/pypy/pypy/changeset/050293498791/ Log: true is a keyword diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -642,30 +642,30 @@ body = """ static PyObject* foo_pi(PyObject* self, PyObject *args) { - PyObject *true = Py_True; - int refcnt = true->ob_refcnt; + PyObject *true_obj = Py_True; + int refcnt = true_obj->ob_refcnt; int refcnt_after; - Py_INCREF(true); - Py_INCREF(true); - PyBool_Check(true); - refcnt_after = true->ob_refcnt; - Py_DECREF(true); - Py_DECREF(true); + Py_INCREF(true_obj); + Py_INCREF(true_obj); + PyBool_Check(true_obj); + refcnt_after = true_obj->ob_refcnt; + Py_DECREF(true_obj); + Py_DECREF(true_obj); fprintf(stderr, "REFCNT %i %i\\n", refcnt, refcnt_after); return PyBool_FromLong(refcnt_after == refcnt+2 && refcnt < 3); } static PyObject* foo_bar(PyObject* self, PyObject *args) { - PyObject *true = Py_True; + PyObject *true_obj = Py_True; PyObject *tup = NULL; - int refcnt = true->ob_refcnt; + int refcnt = true_obj->ob_refcnt; int refcnt_after; tup = PyTuple_New(1); - Py_INCREF(true); - if (PyTuple_SetItem(tup, 0, true) < 0) + Py_INCREF(true_obj); + if (PyTuple_SetItem(tup, 0, true_obj) < 0) return NULL; - refcnt_after = true->ob_refcnt; + refcnt_after = true_obj->ob_refcnt; Py_DECREF(tup); fprintf(stderr, "REFCNT2 %i %i\\n", refcnt, refcnt_after); return PyBool_FromLong(refcnt_after == refcnt); From noreply at buildbot.pypy.org Thu Mar 27 00:34:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Mar 2014 00:34:01 +0100 (CET) Subject: [pypy-commit] pypy default: fix descr_int/long/float on numpy scalars (issue1590) Message-ID: <20140326233401.A33171C066C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70302:297f0a5cd941 Date: 2014-03-26 19:31 -0400 http://bitbucket.org/pypy/pypy/changeset/297f0a5cd941/ Log: fix descr_int/long/float on numpy scalars (issue1590) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -161,22 +161,25 @@ return space.index(self.item(space)) def descr_int(self, space): - if isinstance(self, W_UnsignedIntegerBox): - box = self.convert_to(space, W_UInt64Box._get_dtype(space)) + if isinstance(self, W_ComplexFloatingBox): + box = self.descr_get_real(space) else: - box = self.convert_to(space, W_Int64Box._get_dtype(space)) - return space.int(box.item(space)) + box = self + return space.call_function(space.w_int, box.item(space)) def descr_long(self, space): - if isinstance(self, W_UnsignedIntegerBox): - box = self.convert_to(space, W_UInt64Box._get_dtype(space)) + if isinstance(self, W_ComplexFloatingBox): + box = self.descr_get_real(space) else: - box = self.convert_to(space, W_Int64Box._get_dtype(space)) - return space.long(box.item(space)) + box = self + return space.call_function(space.w_long, box.item(space)) def descr_float(self, space): - box = self.convert_to(space, W_Float64Box._get_dtype(space)) - return space.float(box.item(space)) + if isinstance(self, W_ComplexFloatingBox): + box = self.descr_get_real(space) + else: + box = self + return space.call_function(space.w_float, box.item(space)) def descr_oct(self, space): return space.oct(self.descr_int(space)) @@ -185,8 +188,7 @@ return space.hex(self.descr_int(space)) def descr_nonzero(self, space): - dtype = self.get_dtype(space) - return space.wrap(dtype.itemtype.bool(self)) + return space.wrap(self.get_dtype(space).itemtype.bool(self)) def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -36,6 +36,24 @@ exc = raises(ValueError, "int(np.str_('abc'))") assert exc.value.message.startswith('invalid literal for int()') assert int(np.uint64((2<<63) - 1)) == (2<<63) - 1 + exc = raises(ValueError, "int(np.float64(np.nan))") + assert str(exc.value) == "cannot convert float NaN to integer" + exc = raises(OverflowError, "int(np.float64(np.inf))") + assert str(exc.value) == "cannot convert float infinity to integer" + assert int(np.float64(1e100)) == int(1e100) + assert long(np.float64(1e100)) == int(1e100) + assert int(np.complex128(1e100+2j)) == int(1e100) + exc = raises(OverflowError, "int(np.complex64(1e100+2j))") + assert str(exc.value) == "cannot convert float infinity to integer" + assert int(np.str_('100000000000000000000')) == 100000000000000000000 + assert long(np.str_('100000000000000000000')) == 100000000000000000000 + + assert float(np.float64(1e100)) == 1e100 + assert float(np.complex128(1e100+2j)) == 1e100 + assert float(np.str_('1e100')) == 1e100 + assert float(np.str_('inf')) == np.inf + assert str(float(np.float64(np.nan))) == 'nan' + assert oct(np.int32(11)) == '013' assert oct(np.float32(11.6)) == '013' assert oct(np.complex64(11-12j)) == '013' From noreply at buildbot.pypy.org Thu Mar 27 00:42:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Mar 2014 00:42:36 +0100 (CET) Subject: [pypy-commit] pypy default: partial backout of bef3dc99bdd7 (broke linux tests) Message-ID: <20140326234236.87A321D240D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r70303:a1d21ae8c4d3 Date: 2014-03-26 19:41 -0400 http://bitbucket.org/pypy/pypy/changeset/a1d21ae8c4d3/ Log: partial backout of bef3dc99bdd7 (broke linux tests) diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -158,14 +158,14 @@ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; - if (val->ob_type != (_typeobject *)PyExc_TypeError) + if (val->ob_type != PyExc_TypeError) Py_RETURN_FALSE; /* Normalize again */ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; - if (val->ob_type != (_typeobject *)PyExc_TypeError) + if (val->ob_type != PyExc_TypeError) Py_RETURN_FALSE; PyErr_Restore(type, val, tb); From noreply at buildbot.pypy.org Thu Mar 27 10:45:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Mar 2014 10:45:08 +0100 (CET) Subject: [pypy-commit] pypy default: Contributions welcome. Message-ID: <20140327094508.A883D1C14E7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70304:1ba0c12f17da Date: 2014-03-27 10:44 +0100 http://bitbucket.org/pypy/pypy/changeset/1ba0c12f17da/ Log: Contributions welcome. diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -211,6 +211,9 @@ .. __: `recursion depth limit`_ +We also do not include any of the recent API additions to Stackless +Python, like ``set_atomic()``. Contributions welcome. + Recursion depth limit +++++++++++++++++++++ From noreply at buildbot.pypy.org Thu Mar 27 11:12:12 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 27 Mar 2014 11:12:12 +0100 (CET) Subject: [pypy-commit] stmgc default: make HTM work sometimes: copy nearly all logic from paper about ruby; don't Message-ID: <20140327101212.A1CF01C31CA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1099:ae1a7d62c9a2 Date: 2014-03-27 11:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/ae1a7d62c9a2/ Log: make HTM work sometimes: copy nearly all logic from paper about ruby; don't clean whole nursery on commit, only used parts (always overflowed) diff --git a/c7/demo/Makefile b/c7/demo/Makefile --- a/c7/demo/Makefile +++ b/c7/demo/Makefile @@ -30,3 +30,9 @@ release-%: %.c ${H_FILES} ${C_FILES} clang -I.. -pthread -g -DNDEBUG -O2 $< -o release-$* \ -Wall -Werror ../stmgc.c + + +release-htm-%: %.c ../../htm-c7/stmgc.? ../../htm-c7/htm.h + clang -I.. -pthread -g -DNDEBUG -O2 $< -o release-htm-$* ../../htm-c7/stmgc.c -Wall -DUSE_HTM + + diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -4,11 +4,20 @@ #include #include -#include "stmgc.h" +#ifdef USE_HTM +# include "../../htm-c7/stmgc.h" +#else +# include "stmgc.h" +#endif -#define NTHREADS 3 -#define LIST_LENGTH 2000 -#define BUNCH 100 +#define LIST_LENGTH 4000 +#define NTHREADS 2 + +#ifdef USE_HTM +# define BUNCH 200 +#else +# define BUNCH 200 +#endif typedef TLPREFIX struct node_s node_t; typedef node_t* nodeptr_t; @@ -234,6 +243,7 @@ setup_list(); + for (i = 1; i <= NTHREADS; i++) { newthread(demo2, (void*)(uintptr_t)i); } diff --git a/gil-c7/stmgc.c b/gil-c7/stmgc.c --- a/gil-c7/stmgc.c +++ b/gil-c7/stmgc.c @@ -267,10 +267,11 @@ _stm_nursery_base = malloc(NURSERY_SIZE); assert(_stm_nursery_base); _stm_nursery_end = _stm_nursery_base + NURSERY_SIZE; + _stm_nursery_current = _stm_nursery_base; } + memset(_stm_nursery_base, 0, _stm_nursery_current-_stm_nursery_base); _stm_nursery_current = _stm_nursery_base; - memset(_stm_nursery_base, 0, NURSERY_SIZE); } #define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((char *)(wr)) + (sz) - sizeof(void*))) @@ -332,6 +333,7 @@ { /* run minor collection */ //fprintf(stderr, "minor collect\n"); + _stm_nursery_current -= size_rounded_up; stm_collect(0); char *p = _stm_nursery_current; diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -7,9 +7,14 @@ stm_thread_local_t *_stm_tloc; struct stm_segment_info_s _stm_segment; +#define TRANSIENT_RETRY_MAX 5 +#define GIL_RETRY_MAX 5 +#define ABORT_GIL_LOCKED 1 +#define smp_spinloop() asm volatile ("pause":::"memory") + static void acquire_gil(stm_thread_local_t *tl) { if (pthread_mutex_lock(&_stm_gil) == 0) { _stm_tloc = tl; @@ -18,32 +23,76 @@ abort(); } +static int spin_and_acquire_gil(stm_thread_local_t *tl) { + int n = 100; + while ((n --> 0) && mutex_locked(&_stm_gil)) { + smp_spinloop(); + } + + if (!mutex_locked(&_stm_gil)) + return 0; + + acquire_gil(tl); + return 1; +} + +static int is_persistent(int status) { + if ((status & XBEGIN_XABORT) && XBEGIN_XABORT_ARG(status) == ABORT_GIL_LOCKED) + return 0; + else if (status & (XBEGIN_MAYBE_RETRY | XBEGIN_NORMAL_CONFLICT)) + return 0; + else if (status == XBEGIN_UNKNOWN) + return 0; + return 1; +} + void stm_start_inevitable_transaction(stm_thread_local_t *tl) { + /* set_transaction_length(pc) */ + if (mutex_locked(&_stm_gil)) { - acquire_gil(tl); - return; + if (spin_and_acquire_gil(tl)) + return; } volatile int status; + volatile int transient_retry_counter = TRANSIENT_RETRY_MAX; + volatile int gil_retry_counter = GIL_RETRY_MAX; + volatile int first_retry = 1; + transaction_retry: - status = xbegin(); if (status == XBEGIN_OK) { if (mutex_locked(&_stm_gil)) - xabort(0); + xabort(ABORT_GIL_LOCKED); /* transaction OK */ } else { - if (status & (XBEGIN_MAYBE_RETRY | XBEGIN_NORMAL_CONFLICT | XBEGIN_XABORT)) { - goto transaction_retry; - } else if (mutex_locked(&_stm_gil)) { + if (first_retry) { + first_retry = 0; + /* adjust_transaction_length(pc) */ + } + + if (mutex_locked(&_stm_gil)) { + gil_retry_counter--; + if (gil_retry_counter > 0) { + if (spin_and_acquire_gil(tl)) + return; + else + goto transaction_retry; + } acquire_gil(tl); - } - else { + } else if (is_persistent(status)) { + acquire_gil(tl); + } else { + /* transient abort */ + transient_retry_counter--; + if (transient_retry_counter > 0) + goto transaction_retry; acquire_gil(tl); } - fprintf(stderr, "failed HTM: %s\n", xbegin_status(status)); + /* fprintf(stderr, "failed HTM: %s, t_retry: %d, gil_retry: %d\n", */ + /* xbegin_status(status), transient_retry_counter, gil_retry_counter); */ } _stm_tloc = tl; @@ -55,10 +104,10 @@ if (mutex_locked(&_stm_gil)) { assert(!xtest()); if (pthread_mutex_unlock(&_stm_gil) != 0) abort(); - //fprintf(stderr, "G"); + fprintf(stderr, "G"); } else { xend(); - fprintf(stderr, "==== Committed HTM ====\n"); + fprintf(stderr, "H"); } } @@ -327,10 +376,11 @@ _stm_nursery_base = malloc(NURSERY_SIZE); assert(_stm_nursery_base); _stm_nursery_end = _stm_nursery_base + NURSERY_SIZE; + _stm_nursery_current = _stm_nursery_base; } + memset(_stm_nursery_base, 0, _stm_nursery_current-_stm_nursery_base); _stm_nursery_current = _stm_nursery_base; - memset(_stm_nursery_base, 0, NURSERY_SIZE); } #define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((char *)(wr)) + (sz) - sizeof(void*))) @@ -392,6 +442,7 @@ { /* run minor collection */ //fprintf(stderr, "minor collect\n"); + _stm_nursery_current -= size_rounded_up; stm_collect(0); char *p = _stm_nursery_current; From noreply at buildbot.pypy.org Thu Mar 27 11:53:00 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 27 Mar 2014 11:53:00 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Removed obsolete method primsize(). Message-ID: <20140327105300.08C4D1D24A1@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r704:4afcf60689ed Date: 2014-03-27 10:27 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/4afcf60689ed/ Log: Removed obsolete method primsize(). Removed space reference from PointersObjects (available in their shadow object). Tried to improve code for __str__ and __repr__ of model objects. diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -195,7 +195,7 @@ s_class = w_object.class_shadow(IProxy.space) size = s_class.instsize() if s_class.isvariable(): - size += w_object.primsize(IProxy.space) + size += w_object.varsize(IProxy.space) if not isinstance(w_object, model.W_BytesObject): size *= 4 return size @@ -308,7 +308,7 @@ @expose_on_virtual_machine_proxy([oop], int) def stSizeOf(w_object): - return w_object.primsize(IProxy.space) + return w_object.varsize(IProxy.space) @expose_on_virtual_machine_proxy([int, oop, int], oop) def storeIntegerofObjectwithValue(n0, w_object, a): diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -31,7 +31,8 @@ """Root of Squeak model, abstract.""" _attrs_ = [] # no RPython-level instance variables allowed in W_Object _settled_ = True - + repr_classname = "W_Object" + def size(self): """Return bytesize that conforms to Blue Book. @@ -49,10 +50,6 @@ """Return bytesize of variable-sized part. Variable sized objects are those created with #new:.""" - return self.size(space) - - def primsize(self, space): - # TODO remove this method return self.size() def getclass(self, space): @@ -104,6 +101,9 @@ def invariant(self): return True + def getclass(self, space): + raise NotImplementedError() + def class_shadow(self, space): """Return internal representation of Squeak class.""" return self.getclass(space).as_class_get_shadow(space) @@ -129,12 +129,15 @@ true for some W_PointersObjects""" return True - def __repr__(self): - return self.as_repr_string() - - def as_repr_string(self): - return "%r" % self - + def classname(self, space): + """Get the name of the class of the receiver""" + name = None + if self.has_class(): + name = self.class_shadow(space).name + if not name: + name = "?" + return name + def lshift(self, space, shift): raise error.PrimitiveFailedError() @@ -146,6 +149,33 @@ def is_array_object(self): return False + + # Methods for printing this object + + def guess_classname(self): + """Get the name of the class of the receiver without using a space. + If the shadow of the class of the receiver is not yet initialized, + this might not return a correct name.""" + return "?" + + def __repr__(self): + return self.as_repr_string() + + def __str__(self): + content = self.str_content() + if content: + return "a %s(%s)" % (self.guess_classname(), content) + else: + return "a %s" % (self.guess_classname()) + + def str_content(self): + return "" + + def as_repr_string(self): + return "<%s (a %s) %s>" % (self.repr_classname, self.guess_classname(), self.repr_content()) + + def repr_content(self): + return self.str_content() class W_SmallInteger(W_Object): """Boxed integer value""" @@ -153,7 +183,8 @@ _attrs_ = ['value'] __slots__ = ('value',) # the only allowed slot here _immutable_fields_ = ["value"] - + repr_classname = "W_SmallInteger" + def __init__(self, value): self.value = intmask(value) @@ -191,12 +222,13 @@ # Assume the caller knows what he does, even if int is negative return r_uint(val) - @jit.elidable - def as_repr_string(self): - return "W_SmallInteger(%d)" % self.value + def guess_classname(self): + return "SmallInteger" + + def str_content(self): + return "%d" % self.value def is_same_object(self, other): - # TODO what is correct terminology to say that identity is by value? if not isinstance(other, W_SmallInteger): return False return self.value == other.value @@ -219,11 +251,10 @@ """Object with explicit hash (ie all except small ints and floats).""" _attrs_ = ['hash'] - - #XXX maybe this is too extreme, but it's very random + repr_classname = "W_AbstractObjectWithIdentityHash" + hash_generator = rrandom.Random() UNASSIGNED_HASH = sys.maxint - hash = UNASSIGNED_HASH # default value def fillin(self, space, g_self): @@ -247,7 +278,8 @@ class W_LargePositiveInteger1Word(W_AbstractObjectWithIdentityHash): """Large positive integer for exactly 1 word""" _attrs_ = ["value", "_exposed_size"] - + repr_classname = "W_LargePositiveInteger1Word" + def __init__(self, value, size=4): self.value = intmask(value) self._exposed_size = size @@ -264,12 +296,15 @@ def getclass(self, space): return space.w_LargePositiveInteger - + + def guess_classname(self): + return "LargePositiveInteger" + def invariant(self): return isinstance(self.value, int) - def __repr__(self): - return "W_LargePositiveInteger1Word(%d)" % r_uint(self.value) + def str_content(self): + return "%d" % r_uint(self.value) def lshift(self, space, shift): # shift > 0, therefore the highest bit of upperbound is not set, @@ -326,7 +361,8 @@ class W_Float(W_AbstractObjectWithIdentityHash): """Boxed float value.""" _attrs_ = ['value'] - + repr_classname = "W_Float" + def fillin_fromwords(self, space, high, low): from rpython.rlib.rstruct.ieee import float_unpack from rpython.rlib.rarithmetic import r_ulonglong @@ -347,6 +383,12 @@ """Return Float from special objects array.""" return space.w_Float + def guess_classname(self): + return "Float" + + def str_content(self): + return "%f" % self.value + def gethash(self): return intmask(compute_hash(self.value)) // 2 @@ -358,9 +400,6 @@ self.value, w_other.value = w_other.value, self.value W_AbstractObjectWithIdentityHash._become(self, w_other) - def __repr__(self): - return "W_Float(%f)" % self.value - def is_same_object(self, other): if not isinstance(other, W_Float): return False @@ -417,47 +456,42 @@ class W_AbstractObjectWithClassReference(W_AbstractObjectWithIdentityHash): """Objects with arbitrary class (ie not CompiledMethod, SmallInteger or Float).""" - _attrs_ = ['w_class', 'space'] - + _attrs_ = ['w_class'] + repr_classname = "W_AbstractObjectWithClassReference" + def __init__(self, space, w_class): if w_class is not None: # it's None only for testing and space generation assert isinstance(w_class, W_PointersObject) self.w_class = w_class else: self.w_class = None - self.space = space + def repr_content(self): + return 'len=%d %s' % (self.size(), self.str_content()) + + def space(self): + assert self.shadow, "Cannot access space without a shadow!" + return self.shadow.space + def fillin(self, space, g_self): W_AbstractObjectWithIdentityHash.fillin(self, space, g_self) - self.space = space self.w_class = g_self.get_class() def getclass(self, space): return self.w_class - def __str__(self): - if isinstance(self, W_PointersObject) and self.has_shadow() and self.shadow.has_getname: - return self._get_shadow().getname() + def guess_classname(self): + if self.has_class(): + from shadow import ClassShadow + if isinstance(self.w_class.shadow, ClassShadow): + return self.w_class.shadow.name or "???" + else: + # If the shadow of w_class is not yet converted to a ClassShadow, + # we cannot get the classname, unfortunately. No space available. + return "?" else: - name = None - if self.has_class(): - name = self.class_shadow(self.space).name - return "a %s" % (name or '?',) - - @jit.elidable - def as_repr_string(self): - return self.as_embellished_string("W_O /w Class", "") - - def as_embellished_string(self, className, additionalInformation): - from rpython.rlib.objectmodel import current_object_addr_as_int - if self.has_class(): - name = self.class_shadow(self.space).name - else: - name = "?" - return "<%s (a %s) %s>" % (className, name, - #hex(current_object_addr_as_int(self)), - additionalInformation) - + return "??" + def invariant(self): from spyvm import shadow return (W_AbstractObjectWithIdentityHash.invariant(self) and @@ -521,11 +555,7 @@ """Common object.""" _attrs_ = ['shadow'] shadow = None # Default value - - def changed(self): - # This is called whenever an instance variable is changed on the receiver. - # Was used with a version variable before. Left here in case it might be usefull in the future. - pass + repr_classname = "W_AbstractPointersObject" @jit.unroll_safe def __init__(self, space, w_class, size): @@ -542,6 +572,21 @@ self.initialize_storage(space, len(pointers)) self.store_all(space, pointers) + def __str__(self): + if self.has_shadow() and self.shadow.provides_getname: + return self._get_shadow().getname() + else: + return W_AbstractObjectWithClassReference.__str__(self) + + def repr_content(self): + shadow_info = "no shadow" + name = "" + if self.has_shadow(): + shadow_info = self.shadow.__repr__() + if self.shadow.provides_getname: + name = self._get_shadow().getname() + return '(%s) len=%d %s' % (shadow_info, self.size(), name) + def fetch_all(self, space): return [self.fetch(space, i) for i in range(self.size())] @@ -578,16 +623,12 @@ def instsize(self, space): return self.class_shadow(space).instsize() - def primsize(self, space): - return self.varsize(space) - def size(self): if not self.shadow: return 0 return self._get_shadow().size() def store_shadow(self, shadow): - #assert self.shadow is None or self.shadow is shadow self.shadow = shadow def _get_shadow(self): @@ -658,22 +699,18 @@ @jit.unroll_safe def clone(self, space): my_pointers = self.fetch_all(space) - w_result = W_PointersObject(self.space, self.getclass(space), len(my_pointers)) + w_result = W_PointersObject(space, self.getclass(space), len(my_pointers)) w_result.store_all(space, my_pointers) return w_result - @jit.elidable - def as_repr_string(self): - return W_AbstractObjectWithClassReference.as_embellished_string(self, - className='W_PointersObject', - additionalInformation='len=%d' % self.size()) - class W_PointersObject(W_AbstractPointersObject): + repr_classname = 'W_PointersObject' def default_storage(self, space, size): from spyvm.shadow import ListStorageShadow return ListStorageShadow(space, self, size) class W_WeakPointersObject(W_AbstractPointersObject): + repr_classname = 'W_WeakPointersObject' def default_storage(self, space, size): from spyvm.shadow import WeakListStorageShadow return WeakListStorageShadow(space, self, size) @@ -681,7 +718,8 @@ class W_BytesObject(W_AbstractObjectWithClassReference): _attrs_ = ['bytes', 'c_bytes', '_size'] _immutable_fields_ = ['_size', 'bytes[*]?'] - + repr_classname = 'W_BytesObject' + def __init__(self, space, w_class, size): W_AbstractObjectWithClassReference.__init__(self, space, w_class) assert isinstance(size, int) @@ -739,10 +777,6 @@ def __str__(self): return self.as_string() - def as_repr_string(self): - return W_AbstractObjectWithClassReference.as_embellished_string(self, - className='W_BytesObject', additionalInformation=self.as_string()) - def as_string(self): if self.bytes is not None: return "".join(self.bytes) @@ -780,7 +814,7 @@ def clone(self, space): size = self.size() - w_result = W_BytesObject(self.space, self.getclass(space), size) + w_result = W_BytesObject(space, self.getclass(space), size) if self.bytes is not None: w_result.bytes = list(self.bytes) else: @@ -816,7 +850,8 @@ class W_WordsObject(W_AbstractObjectWithClassReference): _attrs_ = ['words', 'c_words', '_size'] _immutable_fields_ = ['_size'] - + repr_classname = "W_WordsObject" + def __init__(self, space, w_class, size): W_AbstractObjectWithClassReference.__init__(self, space, w_class) self.words = [r_uint(0)] * size @@ -881,17 +916,13 @@ def clone(self, space): size = self.size() - w_result = W_WordsObject(self.space, self.getclass(space), size) + w_result = W_WordsObject(space, self.getclass(space), size) if self.words is not None: w_result.words = list(self.words) else: w_result.words = [r_uint(self.c_words[i]) for i in range(size)] return w_result - def as_repr_string(self): - return W_AbstractObjectWithClassReference.as_embellished_string(self, - className='W_WordsObject', additionalInformation=('len=%d' % self.size())) - def is_array_object(self): return True @@ -936,7 +967,8 @@ class W_DisplayBitmap(W_AbstractObjectWithClassReference): _attrs_ = ['pixelbuffer', '_realsize', '_real_depth_buffer', 'display', '_depth'] _immutable_fields_ = ['_realsize', 'display', '_depth'] - + repr_classname = "W_DisplayBitmap" + pixelbuffer = None @staticmethod @@ -950,6 +982,9 @@ else: return W_DisplayBitmap(space, w_class, size, depth, display) + def repr_content(self): + return "len=%d depth=%d %s" % (self.size(), self._depth, self.str_content()) + def __init__(self, space, w_class, size, depth, display): W_AbstractObjectWithClassReference.__init__(self, space, w_class) self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') @@ -975,7 +1010,7 @@ return False def clone(self, space): - w_result = W_WordsObject(self.space, self.getclass(space), self._realsize) + w_result = W_WordsObject(space, self.getclass(space), self._realsize) n = 0 while n < self._realsize: w_result.words[n] = self.getword(n) @@ -1005,6 +1040,7 @@ class W_16BitDisplayBitmap(W_DisplayBitmap): + repr_classname = "W_16BitDisplayBitmap" def setword(self, n, word): self._real_depth_buffer[n] = word mask = 0b11111 @@ -1026,6 +1062,7 @@ class W_8BitDisplayBitmap(W_DisplayBitmap): + repr_classname = "W_8BitDisplayBitmap" def setword(self, n, word): self._real_depth_buffer[n] = word self.display.get_pixelbuffer()[n] = r_uint( @@ -1035,9 +1072,9 @@ (word << 24) ) - NATIVE_DEPTH = 8 class W_MappingDisplayBitmap(W_DisplayBitmap): + repr_classname = "W_MappingDisplayBitmap" @jit.unroll_safe def setword(self, n, word): self._real_depth_buffer[n] = word @@ -1071,6 +1108,7 @@ bytecodes (variable) """ + repr_classname = "W_CompiledMethod" _immutable_fields_ = ["_shadow?"] _attrs_ = ["bytes", "_likely_methodname", "header", "argsize", "primitive", "literals", "tempsize", "literalsize", "islarge", "_shadow"] @@ -1122,11 +1160,11 @@ def getclass(self, space): return space.w_CompiledMethod - def __str__(self): - return self.as_string() - - def as_repr_string(self): - return "" % self.get_identifier_string() + def guess_classname (self): + return "CompiledMethod" + + def str_content(self): + return self.get_identifier_string() def as_string(self, markBytecode=0): from spyvm.interpreter import BYTECODE_TABLE @@ -1139,20 +1177,20 @@ j += 1 return retval + "---------------------\n" - def get_identifier_string(self): - from spyvm import shadow + def guess_containing_classname(self): + from spyvm.shadow import ClassShadow guessed_classname = None if len(self.literals) > 0: w_candidate = self.literals[-1] if isinstance(w_candidate, W_PointersObject): c_shadow = w_candidate._get_shadow() - if isinstance(c_shadow, shadow.ClassShadow): + if isinstance(c_shadow, ClassShadow): guessed_classname = c_shadow.getname() elif w_candidate.size() >= 2: w_class = w_candidate.fetch(None, 1) if isinstance(w_class, W_PointersObject): d_shadow = w_class._get_shadow() - if isinstance(d_shadow, shadow.ClassShadow): + if isinstance(d_shadow, ClassShadow): guessed_classname = d_shadow.getname() if guessed_classname: class_cutoff = len(guessed_classname) - 6 @@ -1162,7 +1200,10 @@ classname = guessed_classname else: classname = "" - return "%s >> #%s" % (classname, self._likely_methodname) + return classname + + def get_identifier_string(self): + return "%s >> #%s" % (self.guess_containing_classname(), self._likely_methodname) def invariant(self): return (W_Object.invariant(self) and diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -15,7 +15,7 @@ raise PrimitiveFailedError() def assert_valid_index(space, n0, w_obj): - if not 0 <= n0 < w_obj.primsize(space): + if not 0 <= n0 < w_obj.varsize(space): raise PrimitiveFailedError() # return the index, since from here on the annotator knows that # n0 cannot be negative @@ -402,7 +402,7 @@ def func(interp, s_frame, w_obj): if not w_obj.class_shadow(interp.space).isvariable(): raise PrimitiveFailedError() - return interp.space.wrap_int(w_obj.primsize(interp.space)) + return interp.space.wrap_int(w_obj.varsize(interp.space)) @expose_primitive(STRING_AT, unwrap_spec=[object, index1_0]) def func(interp, s_frame, w_obj, n0): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -11,7 +11,7 @@ can be attached at run-time to any Smalltalk object. """ _attrs_ = ['_w_self', 'space'] - has_getname = True + provides_getname = True def __init__(self, space, w_self): self.space = space @@ -41,7 +41,7 @@ class ListStorageShadow(AbstractShadow): _attrs_ = ['storage'] - has_getname = False + provides_getname = False def __init__(self, space, w_self, size): AbstractShadow.__init__(self, space, w_self) @@ -62,7 +62,7 @@ class WeakListStorageShadow(AbstractShadow): _attrs_ = ['storage'] - has_getname = False + provides_getname = False def __init__(self, space, w_self, size): AbstractShadow.__init__(self, space, w_self) @@ -82,7 +82,7 @@ _attrs_ = ['version'] import_from_mixin(version.VersionMixin) version = None - has_getname = True + provides_getname = True def __init__(self, space, w_self): ListStorageShadow.__init__(self, space, w_self, 0) From noreply at buildbot.pypy.org Thu Mar 27 11:53:01 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 27 Mar 2014 11:53:01 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Only ClassShadow provides information for printing their objects. Message-ID: <20140327105301.2D0491D24A1@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r705:f7c638fc25ac Date: 2014-03-27 10:32 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/f7c638fc25ac/ Log: Only ClassShadow provides information for printing their objects. diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -11,7 +11,7 @@ can be attached at run-time to any Smalltalk object. """ _attrs_ = ['_w_self', 'space'] - provides_getname = True + provides_getname = False def __init__(self, space, w_self): self.space = space @@ -41,7 +41,6 @@ class ListStorageShadow(AbstractShadow): _attrs_ = ['storage'] - provides_getname = False def __init__(self, space, w_self, size): AbstractShadow.__init__(self, space, w_self) @@ -62,7 +61,6 @@ class WeakListStorageShadow(AbstractShadow): _attrs_ = ['storage'] - provides_getname = False def __init__(self, space, w_self, size): AbstractShadow.__init__(self, space, w_self) @@ -82,7 +80,6 @@ _attrs_ = ['version'] import_from_mixin(version.VersionMixin) version = None - provides_getname = True def __init__(self, space, w_self): ListStorageShadow.__init__(self, space, w_self, 0) @@ -112,6 +109,7 @@ _attrs_ = ["name", "_instance_size", "instance_varsized", "instance_kind", "_s_methoddict", "_s_superclass", "subclass_s"] name = None + provides_getname = True def __init__(self, space, w_self): # fields added here should also be in objspace.py:56ff, 300ff From noreply at buildbot.pypy.org Thu Mar 27 11:53:02 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 27 Mar 2014 11:53:02 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Made _w_self and space fields of Shadows immutable. Message-ID: <20140327105302.4C0F21D24A1@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r706:9ef7a4c22818 Date: 2014-03-27 11:52 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/9ef7a4c22818/ Log: Made _w_self and space fields of Shadows immutable. Added some additional __repr__ info to shadows. Fixed finding name of Metaclasses. Added some tests for that. Made printing of classes/metaclasses more consistent with Smalltalk's asString. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -469,10 +469,6 @@ def repr_content(self): return 'len=%d %s' % (self.size(), self.str_content()) - def space(self): - assert self.shadow, "Cannot access space without a shadow!" - return self.shadow.space - def fillin(self, space, g_self): W_AbstractObjectWithIdentityHash.fillin(self, space, g_self) self.w_class = g_self.get_class() @@ -482,15 +478,12 @@ def guess_classname(self): if self.has_class(): - from shadow import ClassShadow - if isinstance(self.w_class.shadow, ClassShadow): - return self.w_class.shadow.name or "???" - else: - # If the shadow of w_class is not yet converted to a ClassShadow, - # we cannot get the classname, unfortunately. No space available. - return "?" + class_shadow = self.class_shadow(self.w_class.space()) + # Three question marks, because it would be highly irregular to have + # an initialized ClassShadow without an initialized name field. + return class_shadow.name or "???" else: - return "??" + return "? (no class)" def invariant(self): from spyvm import shadow @@ -572,6 +565,10 @@ self.initialize_storage(space, len(pointers)) self.store_all(space, pointers) + def space(self): + assert self.shadow, "Cannot access space without a shadow!" + return self.shadow.space + def __str__(self): if self.has_shadow() and self.shadow.provides_getname: return self._get_shadow().getname() @@ -774,8 +771,8 @@ def size(self): return self._size - def __str__(self): - return self.as_string() + def str_content(self): + return "'%s'" % self.as_string() def as_string(self): if self.bytes is not None: diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -46,6 +46,8 @@ name = "w_" + name if not name in self.objtable or not self.objtable[name]: self.objtable[name] = specials[idx] + # XXX this is kind of hacky, but I don't know where else to get Metaclass + self.classtable["w_Metaclass"] = self.w_SmallInteger.w_class.w_class def executable_path(self): return self._executable_path[0] diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -11,7 +11,9 @@ can be attached at run-time to any Smalltalk object. """ _attrs_ = ['_w_self', 'space'] + _immutable_fields_ = ['_w_self', 'space'] provides_getname = False + repr_classname = "AbstractShadow" def __init__(self, space, w_self): self.space = space @@ -19,7 +21,12 @@ def w_self(self): return self._w_self def getname(self): - return repr(self) + raise NotImplementedError("Abstract class") + def __repr__(self): + if self.provides_getname: + return "<%s %s>" % (self.repr_classname, self.getname()) + else: + return "<%s>" % self.repr_classname def fetch(self, n0): raise NotImplementedError("Abstract class") @@ -41,6 +48,7 @@ class ListStorageShadow(AbstractShadow): _attrs_ = ['storage'] + repr_classname = "ListStorageShadow" def __init__(self, space, w_self, size): AbstractShadow.__init__(self, space, w_self) @@ -61,6 +69,7 @@ class WeakListStorageShadow(AbstractShadow): _attrs_ = ['storage'] + repr_classname = "WeakListStorageShadow" def __init__(self, space, w_self, size): AbstractShadow.__init__(self, space, w_self) @@ -78,6 +87,7 @@ class AbstractCachingShadow(ListStorageShadow): _immutable_fields_ = ['version?'] _attrs_ = ['version'] + repr_classname = "AbstractCachingShadow" import_from_mixin(version.VersionMixin) version = None @@ -108,13 +118,12 @@ _attrs_ = ["name", "_instance_size", "instance_varsized", "instance_kind", "_s_methoddict", "_s_superclass", "subclass_s"] - name = None + name = '??' + _s_superclass = None provides_getname = True + repr_classname = "ClassShadow" def __init__(self, space, w_self): - # fields added here should also be in objspace.py:56ff, 300ff - self.name = '?' - self._s_superclass = None self.subclass_s = {} AbstractCachingShadow.__init__(self, space, w_self) @@ -168,23 +177,19 @@ self.instance_kind = COMPILED_METHOD else: raise ClassShadowError("unknown format %d" % (format,)) - elif n0 == constants.CLASS_NAME_INDEX: - self.store_w_name(w_val) - elif n0 == self.size() - 1: - # In case of Metaclasses, the "instance" class is stored in the last field. - # TODO - only do this if we are sure this is a Metaclass. Check out space.w_Metaclass. - if isinstance(w_val, model.W_PointersObject): - cl_shadow = w_val.shadow - if isinstance(cl_shadow, ClassShadow): - # If we're lucky, it's already a class shadow and we can reuse the stored information - if cl_shadow.name: - self.name = "%s class" % cl_shadow.name - elif w_val.size() >= constants.CLASS_NAME_INDEX: - # If not, we have to extract the class name - w_classname = w_val.fetch(self.space, constants.CLASS_NAME_INDEX) - self.store_w_name(w_classname) else: - return + if self._w_self.w_class == self.space.classtable["w_Metaclass"]: + # In case of Metaclasses, the "instance" class is stored in the last field. + if n0 == self.size() - 1 and isinstance(w_val, model.W_PointersObject): + cl_shadow = w_val.as_class_get_shadow(self.space) + self.name = "%s class" % cl_shadow.getname() + else: + return + elif n0 == constants.CLASS_NAME_INDEX: + # In case of regular classes, the name is stored here. + self.store_w_name(w_val) + else: + return # Some of the special info has changed -> Switch version. self.changed() @@ -255,7 +260,7 @@ return self._s_superclass def getname(self): - return "%s class" % (self.name or '?',) + return self.name or '?' # _______________________________________________________________ # Methods for querying the format word, taken from the blue book: @@ -290,9 +295,6 @@ # _______________________________________________________________ # Other Methods - def __repr__(self): - return "" % (self.name or '?',) - @constant_for_version def lookup(self, w_selector): look_in_shadow = self @@ -348,7 +350,8 @@ _immutable_fields_ = ['invalid?', 's_class'] _attrs_ = ['methoddict', 'invalid', 's_class'] - + repr_classname = "MethodDictionaryShadow" + def __init__(self, space, w_self): self.invalid = True self.s_class = None @@ -415,7 +418,8 @@ class AbstractRedirectingShadow(AbstractShadow): _attrs_ = ['_w_self_size'] - + repr_classname = "AbstractRedirectingShadow" + def __init__(self, space, w_self): AbstractShadow.__init__(self, space, w_self) if w_self is not None: @@ -431,7 +435,8 @@ __metaclass__ = extendabletype _attrs_ = ['_s_sender', '_pc', '_temps_and_stack', '_stack_ptr', 'instances_w'] - + repr_classname = "ContextPartShadow" + _virtualizable_ = [ "_s_sender", "_pc", "_temps_and_stack[*]", "_stack_ptr", @@ -754,7 +759,8 @@ class BlockContextShadow(ContextPartShadow): _attrs_ = ['_w_home', '_initialip', '_eargc'] - + repr_classname = "BlockContextShadow" + def __init__(self, space, w_self=None, w_home=None, argcnt=0, initialip=0): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) creating_w_self = w_self is None @@ -857,7 +863,8 @@ class MethodContextShadow(ContextPartShadow): _attrs_ = ['w_closure_or_nil', '_w_receiver', '_w_method'] - + repr_classname = "MethodContextShadow" + @jit.unroll_safe def __init__(self, space, w_self=None, s_method=None, w_receiver=None, arguments=None, s_sender=None, closure=None, pc=0): @@ -1020,7 +1027,8 @@ "w_compiledin", "version"] _immutable_fields_ = ["version?", "_w_self"] import_from_mixin(version.VersionMixin) - + repr_classname = "CompiledMethodShadow" + def __init__(self, w_compiledmethod, space): self._w_self = w_compiledmethod self.space = space @@ -1087,6 +1095,7 @@ return self.bytecode[pc] class CachedObjectShadow(AbstractCachingShadow): + repr_classname = "CachedObjectShadow" @elidable_for_version def fetch(self, n0): @@ -1098,6 +1107,7 @@ class ObserveeShadow(ListStorageShadow): _attrs_ = ['dependent'] + repr_classname = "ObserveeShadow" def __init__(self, space, w_self): ListStorageShadow.__init__(self, space, w_self, 0) self.dependent = None diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -69,32 +69,48 @@ assert isinstance(w_float_class_name, model.W_BytesObject) assert w_float_class_name.bytes == list("Float") -def test_str_w_object(): +# TODO - many of these test would belong in test_model.py + +def test_str_float(): + assert str(space.wrap_float(3.0)) == "a Float(3.000000)" + +def test_str_string(): + assert str(space.wrap_string('hello')) == "a String('hello')" + +def test_str_float(): + assert str(space.wrap_float(3.0)) == "a Float(3.000000)" + +def test_str_class_object(): w_float_class = get_float_class() w_float_class.as_class_get_shadow(space) - assert str(w_float_class) == "Float class" + assert str(w_float_class) == "Float" + w_float_class.class_shadow(space) - #assert str(w_float_class.getclass(space)) == "a Metaclass" #yes, with article + assert str(w_float_class.getclass(space)) == "Float class" + w_float_class.getclass(space).class_shadow(space) - #assert str(w_float_class.getclass(space).getclass(space)) == "Metaclass class" + assert str(w_float_class.getclass(space).getclass(space)) == "Metaclass" + + w_float_class.getclass(space).getclass(space).class_shadow(space) + assert str(w_float_class.getclass(space).getclass(space).getclass(space)) == "Metaclass class" def test_nil_true_false(): image = get_image() w = image.special(constants.SO_NIL) w.class_shadow(space) - assert str(w) == "a UndefinedObject" #yes, with article + assert str(w) == "a UndefinedObject" w = image.special(constants.SO_FALSE) w.class_shadow(space) - assert str(w) == "a False" #yes, with article + assert str(w) == "a False" w = image.special(constants.SO_TRUE) w.class_shadow(space) - assert str(w) == "a True" #yes, with article + assert str(w) == "a True" def test_scheduler(): image = get_image() w = image.special(constants.SO_SCHEDULERASSOCIATIONPOINTER) w0 = w.fetch(space, 0) - assert str(w0) == "Processor" + assert str(w0) == "a Symbol('Processor')" w0 = w.fetch(space, 1) w0.class_shadow(space) assert str(w0) == "a ProcessorScheduler" @@ -106,15 +122,15 @@ assert str(obj) == expected_name image = get_image() # w = image.special(constants.SO_BITMAP_CLASS) - # assert str(w) == "Bitmap class" - test_classname(constants.SO_SMALLINTEGER_CLASS, "SmallInteger class") - test_classname(constants.SO_ARRAY_CLASS, "Array class") - test_classname(constants.SO_FLOAT_CLASS, "Float class") - test_classname(constants.SO_METHODCONTEXT_CLASS, "MethodContext class") - test_classname(constants.SO_BLOCKCONTEXT_CLASS, "BlockContext class") - test_classname(constants.SO_POINT_CLASS, "Point class") - test_classname(constants.SO_LARGEPOSITIVEINTEGER_CLASS, "LargePositiveInteger class") - test_classname(constants.SO_MESSAGE_CLASS, "Message class") + # assert str(w) == "Bitmap" + test_classname(constants.SO_SMALLINTEGER_CLASS, "SmallInteger") + test_classname(constants.SO_ARRAY_CLASS, "Array") + test_classname(constants.SO_FLOAT_CLASS, "Float") + test_classname(constants.SO_METHODCONTEXT_CLASS, "MethodContext") + test_classname(constants.SO_BLOCKCONTEXT_CLASS, "BlockContext") + test_classname(constants.SO_POINT_CLASS, "Point") + test_classname(constants.SO_LARGEPOSITIVEINTEGER_CLASS, "LargePositiveInteger") + test_classname(constants.SO_MESSAGE_CLASS, "Message") # to be continued @@ -141,8 +157,8 @@ def test_special_objects0(): image = get_image() w = image.special(constants.SO_DOES_NOT_UNDERSTAND) - assert str(w) == "doesNotUnderstand:" - assert str(w.getclass(space)) == "Symbol class" # for some strange reason not a symbol + assert str(w) == "a Symbol('doesNotUnderstand:')" + assert str(w.getclass(space)) == "Symbol" # for some strange reason not a symbol """ @@ -328,7 +344,7 @@ assert w_message_cls is interp.space.classtable["w_Message"] assert isinstance(w_message_cls, model.W_PointersObject) s_message_cls = w_message_cls.as_class_get_shadow(interp.space) - assert s_message_cls.getname() == "Message class" + assert s_message_cls.getname() == "Message" w_message = s_message_cls.new() assert isinstance(w_message, model.W_PointersObject) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -10,6 +10,9 @@ from spyvm.tool.analyseimage import create_image from spyvm.interpreter_proxy import VirtualMachine +def print_result(w_result): + # This will also print contents of strings/symbols/numbers + print w_result.as_repr_string().replace('\r', '\n') def _run_benchmark(interp, number, benchmark, arg): from spyvm.plugins.vmdebugging import stop_ui_process @@ -46,8 +49,7 @@ w_result = _run_image(interp) t2 = time.time() if w_result: - if isinstance(w_result, model.W_BytesObject): - print w_result.as_string().replace('\r', '\n') + print_result(w_result) print "took %s seconds" % (t2 - t1) return 0 return -1 @@ -90,10 +92,7 @@ print e.msg return 1 if w_result: - if isinstance(w_result, model.W_BytesObject): - print w_result.as_string().replace('\r', '\n') - else: - print w_result.as_repr_string().replace('\r', '\n') + print_result(w_result) return 0 else: return _run_benchmark(interp, 0, selector, "") From noreply at buildbot.pypy.org Thu Mar 27 13:05:40 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 27 Mar 2014 13:05:40 +0100 (CET) Subject: [pypy-commit] stmgc default: tweaks and some stats Message-ID: <20140327120540.58C6F1C02C1@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1100:49dc2a80331b Date: 2014-03-27 13:05 +0100 http://bitbucket.org/pypy/stmgc/changeset/49dc2a80331b/ Log: tweaks and some stats diff --git a/duhton/demo/micro_transactions.duh b/duhton/demo/micro_transactions.duh new file mode 100644 --- /dev/null +++ b/duhton/demo/micro_transactions.duh @@ -0,0 +1,18 @@ + + + +(setq c (container 0)) + + +(defun increment () + ;;(set c (+ (get c) 1)) + ) + + +(setq n 0) +(while (< n 10000000) + (transaction increment) + (setq n (+ n 1)) + ) + +(run-transactions) diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -13,6 +13,10 @@ #define ABORT_GIL_LOCKED 1 +static __thread int gil_transactions = 0; +static __thread int htm_transactions = 0; + + #define smp_spinloop() asm volatile ("pause":::"memory") static void acquire_gil(stm_thread_local_t *tl) { @@ -75,10 +79,12 @@ if (mutex_locked(&_stm_gil)) { gil_retry_counter--; if (gil_retry_counter > 0) { - if (spin_and_acquire_gil(tl)) + if (spin_and_acquire_gil(tl)) { return; - else + } else { + smp_spinloop(); goto transaction_retry; + } } acquire_gil(tl); } else if (is_persistent(status)) { @@ -86,8 +92,10 @@ } else { /* transient abort */ transient_retry_counter--; - if (transient_retry_counter > 0) + if (transient_retry_counter > 0) { + smp_spinloop(); goto transaction_retry; + } acquire_gil(tl); } @@ -104,14 +112,23 @@ if (mutex_locked(&_stm_gil)) { assert(!xtest()); if (pthread_mutex_unlock(&_stm_gil) != 0) abort(); - fprintf(stderr, "G"); + gil_transactions++; + //fprintf(stderr, "G"); } else { xend(); - fprintf(stderr, "H"); + htm_transactions++; + //fprintf(stderr, "H"); } } +void stm_unregister_thread_local(stm_thread_local_t *tl) { + fprintf(stderr, + "in %p\ngil_transactions: %d\nhtm_transactions: %d\nratio: %f\n", + tl, gil_transactions, htm_transactions, + (float)gil_transactions / (float)htm_transactions); + free(tl->shadowstack_base); +} diff --git a/htm-c7/stmgc.h b/htm-c7/stmgc.h --- a/htm-c7/stmgc.h +++ b/htm-c7/stmgc.h @@ -77,9 +77,7 @@ tl->shadowstack = tl->shadowstack_base; tl->last_abort__bytes_in_nursery = 0; } -inline static void stm_unregister_thread_local(stm_thread_local_t *tl) { - free(tl->shadowstack_base); -} +void stm_unregister_thread_local(stm_thread_local_t *tl); extern pthread_mutex_t _stm_gil; From noreply at buildbot.pypy.org Thu Mar 27 14:08:05 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 27 Mar 2014 14:08:05 +0100 (CET) Subject: [pypy-commit] stmgc default: add htm_transaction_info Message-ID: <20140327130805.5F4201D2A9B@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1101:f0d999963700 Date: 2014-03-27 13:58 +0100 http://bitbucket.org/pypy/stmgc/changeset/f0d999963700/ Log: add htm_transaction_info diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -16,12 +16,14 @@ static __thread int gil_transactions = 0; static __thread int htm_transactions = 0; +__thread struct htm_transaction_info_s _htm_info; #define smp_spinloop() asm volatile ("pause":::"memory") static void acquire_gil(stm_thread_local_t *tl) { if (pthread_mutex_lock(&_stm_gil) == 0) { _stm_tloc = tl; + _htm_info.use_gil = 1; return; } abort(); @@ -53,6 +55,11 @@ void stm_start_inevitable_transaction(stm_thread_local_t *tl) { /* set_transaction_length(pc) */ + /* fprintf(stderr, "previous tr: retry: %d gil: %d\n", */ + /* _htm_info.retry_counter, _htm_info.use_gil); */ + _htm_info.retry_counter = 0; + _htm_info.use_gil = 0; + if (mutex_locked(&_stm_gil)) { if (spin_and_acquire_gil(tl)) return; @@ -91,6 +98,7 @@ acquire_gil(tl); } else { /* transient abort */ + _htm_info.retry_counter++; transient_retry_counter--; if (transient_retry_counter > 0) { smp_spinloop(); diff --git a/htm-c7/stmgc.h b/htm-c7/stmgc.h --- a/htm-c7/stmgc.h +++ b/htm-c7/stmgc.h @@ -13,6 +13,14 @@ #define STM_NB_SEGMENTS 4 +#define HTM_INFO_AVAILABLE 1 +struct htm_transaction_info_s { + int retry_counter; /* only counting transient aborts of HTM */ + int use_gil; /* in GIL mode? 0=HTM */ +}; +extern __thread struct htm_transaction_info_s _htm_info; + + typedef struct { /* empty */ } stm_jmpbuf_t; typedef struct object_s { @@ -29,6 +37,7 @@ extern stm_thread_local_t *_stm_tloc; extern char *_stm_nursery_current, *_stm_nursery_end; + struct stm_segment_info_s { stm_jmpbuf_t *jmpbuf_ptr; /* compat only -- always NULL */ char *nursery_current; /* compat only -- always NULL */ From noreply at buildbot.pypy.org Thu Mar 27 14:08:06 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 27 Mar 2014 14:08:06 +0100 (CET) Subject: [pypy-commit] stmgc default: use a thread-local malloc for old objects Message-ID: <20140327130806.9546F1D2A9B@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1102:f8e743eb8011 Date: 2014-03-27 14:08 +0100 http://bitbucket.org/pypy/stmgc/changeset/f8e743eb8011/ Log: use a thread-local malloc for old objects diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -138,8 +138,24 @@ free(tl->shadowstack_base); } +/************************************************************/ +/* some simple thread-local malloc: */ +#define MAX_MALLOC (1000 * 1024 * 1024) +static __thread char* _malloc_area_base = NULL; +static __thread char* _malloc_area_current = NULL; +void* tl_malloc(size_t size) { + if (_malloc_area_base == NULL) { + _malloc_area_base = malloc(MAX_MALLOC); + _malloc_area_current = _malloc_area_base; + } + void* res = _malloc_area_current; + _malloc_area_current += size; + if (_malloc_area_current - _malloc_area_base > MAX_MALLOC) + abort(); + return res; +} /************************************************************/ @@ -272,7 +288,7 @@ object_t *_stm_allocate_old(ssize_t size) { - char *p = malloc(size); + char *p = tl_malloc(size); assert(p); memset(p, 0, size); ((object_t *)p)->gil_flags = _STM_GCFLAG_WRITE_BARRIER; @@ -281,7 +297,7 @@ object_t *_stm_allocate_external(ssize_t size) { - char *p = malloc(size); + char *p = tl_malloc(size); assert(p); memset(p, 0, size); _stm_write_slowpath((object_t *)p); @@ -335,7 +351,7 @@ */ size_t size = stmcb_size_rounded_up(obj); - nobj = malloc(size); + nobj = tl_malloc(size); assert(nobj); /* Copy the object */ From noreply at buildbot.pypy.org Thu Mar 27 14:23:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Mar 2014 14:23:21 +0100 (CET) Subject: [pypy-commit] stmgc default: The nursery page's read markers are never read, but must still be writeable. We'd Message-ID: <20140327132321.BC3DB1C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1103:785dafe38e34 Date: 2014-03-27 14:07 +0100 http://bitbucket.org/pypy/stmgc/changeset/785dafe38e34/ Log: The nursery page's read markers are never read, but must still be writeable. We'd like to map the pages to a general "trash page"; missing one, we remap all the pages over to the same one. We still keep one page *per segment* to avoid cross-CPU cache conflicts. diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -28,6 +28,8 @@ #define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) +#define OLD_RM_START ((END_NURSERY_PAGE * 4096UL) >> 4) +#define FIRST_OLD_RM_PAGE (OLD_RM_START / 4096UL) #define NB_READMARKER_PAGES (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) #define WRITELOCK_START ((END_NURSERY_PAGE * 4096UL) >> 4) diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -169,25 +169,22 @@ increment_total_allocated(total); } +static void pages_setup_readmarkers_for_nursery(void) +{ + /* The nursery page's read markers are never read, but must still + be writeable. We'd like to map the pages to a general "trash + page"; missing one, we remap all the pages over to the same one. + We still keep one page *per segment* to avoid cross-CPU cache + conflicts. + */ + long i, j; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *segment_base = get_segment_base(i); -#if 0 -static bool is_fully_in_shared_pages(object_t *obj) -{ - uintptr_t first_page = ((uintptr_t)obj) / 4096UL; - - if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) - return (flag_page_private[first_page] == SHARED_PAGE); - - ssize_t obj_size = stmcb_size_rounded_up( - (struct object_s *)REAL_ADDRESS(stm_object_pages, obj)); - - uintptr_t last_page = (((uintptr_t)obj) + obj_size - 1) / 4096UL; - - do { - if (flag_page_private[first_page++] != SHARED_PAGE) - return false; - } while (first_page <= last_page); - - return true; + for (j = FIRST_READMARKER_PAGE + 1; j < FIRST_OLD_RM_PAGE; j++) { + remap_file_pages(segment_base + 4096 * j, 4096, 0, + i * NB_PAGES + FIRST_READMARKER_PAGE, 0); + /* errors here ignored */ + } + } } -#endif diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -39,6 +39,7 @@ static void page_privatize(uintptr_t pagenum); static void page_reshare(uintptr_t pagenum); static void _page_do_reshare(long segnum, uintptr_t pagenum); +static void pages_setup_readmarkers_for_nursery(void); /* Note: don't ever do "mutex_pages_lock(); mutex_lock()" in that order */ static void mutex_pages_lock(void); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -36,6 +36,7 @@ (FIRST_READMARKER_PAGE - 2) * 4096UL, PROT_NONE); } + pages_setup_readmarkers_for_nursery(); } void stm_setup(void) From noreply at buildbot.pypy.org Thu Mar 27 14:23:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Mar 2014 14:23:22 +0100 (CET) Subject: [pypy-commit] stmgc default: Failed to measure any difference in performance, keeping it around for now as it shouldn't hurt Message-ID: <20140327132322.EE2671C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1104:d9f6900ea6ab Date: 2014-03-27 14:23 +0100 http://bitbucket.org/pypy/stmgc/changeset/d9f6900ea6ab/ Log: Failed to measure any difference in performance, keeping it around for now as it shouldn't hurt diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -176,6 +176,8 @@ page"; missing one, we remap all the pages over to the same one. We still keep one page *per segment* to avoid cross-CPU cache conflicts. + + (XXX no performance difference measured so far) */ long i, j; for (i = 1; i <= NB_SEGMENTS; i++) { From noreply at buildbot.pypy.org Thu Mar 27 14:23:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Mar 2014 14:23:24 +0100 (CET) Subject: [pypy-commit] stmgc default: merge heads Message-ID: <20140327132324.2492A1C02C1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1105:eb3d1810eec8 Date: 2014-03-27 14:23 +0100 http://bitbucket.org/pypy/stmgc/changeset/eb3d1810eec8/ Log: merge heads diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -16,12 +16,14 @@ static __thread int gil_transactions = 0; static __thread int htm_transactions = 0; +__thread struct htm_transaction_info_s _htm_info; #define smp_spinloop() asm volatile ("pause":::"memory") static void acquire_gil(stm_thread_local_t *tl) { if (pthread_mutex_lock(&_stm_gil) == 0) { _stm_tloc = tl; + _htm_info.use_gil = 1; return; } abort(); @@ -53,6 +55,11 @@ void stm_start_inevitable_transaction(stm_thread_local_t *tl) { /* set_transaction_length(pc) */ + /* fprintf(stderr, "previous tr: retry: %d gil: %d\n", */ + /* _htm_info.retry_counter, _htm_info.use_gil); */ + _htm_info.retry_counter = 0; + _htm_info.use_gil = 0; + if (mutex_locked(&_stm_gil)) { if (spin_and_acquire_gil(tl)) return; @@ -91,6 +98,7 @@ acquire_gil(tl); } else { /* transient abort */ + _htm_info.retry_counter++; transient_retry_counter--; if (transient_retry_counter > 0) { smp_spinloop(); @@ -130,8 +138,24 @@ free(tl->shadowstack_base); } +/************************************************************/ +/* some simple thread-local malloc: */ +#define MAX_MALLOC (1000 * 1024 * 1024) +static __thread char* _malloc_area_base = NULL; +static __thread char* _malloc_area_current = NULL; +void* tl_malloc(size_t size) { + if (_malloc_area_base == NULL) { + _malloc_area_base = malloc(MAX_MALLOC); + _malloc_area_current = _malloc_area_base; + } + void* res = _malloc_area_current; + _malloc_area_current += size; + if (_malloc_area_current - _malloc_area_base > MAX_MALLOC) + abort(); + return res; +} /************************************************************/ @@ -264,7 +288,7 @@ object_t *_stm_allocate_old(ssize_t size) { - char *p = malloc(size); + char *p = tl_malloc(size); assert(p); memset(p, 0, size); ((object_t *)p)->gil_flags = _STM_GCFLAG_WRITE_BARRIER; @@ -273,7 +297,7 @@ object_t *_stm_allocate_external(ssize_t size) { - char *p = malloc(size); + char *p = tl_malloc(size); assert(p); memset(p, 0, size); _stm_write_slowpath((object_t *)p); @@ -327,7 +351,7 @@ */ size_t size = stmcb_size_rounded_up(obj); - nobj = malloc(size); + nobj = tl_malloc(size); assert(nobj); /* Copy the object */ diff --git a/htm-c7/stmgc.h b/htm-c7/stmgc.h --- a/htm-c7/stmgc.h +++ b/htm-c7/stmgc.h @@ -13,6 +13,14 @@ #define STM_NB_SEGMENTS 4 +#define HTM_INFO_AVAILABLE 1 +struct htm_transaction_info_s { + int retry_counter; /* only counting transient aborts of HTM */ + int use_gil; /* in GIL mode? 0=HTM */ +}; +extern __thread struct htm_transaction_info_s _htm_info; + + typedef struct { /* empty */ } stm_jmpbuf_t; typedef struct object_s { @@ -29,6 +37,7 @@ extern stm_thread_local_t *_stm_tloc; extern char *_stm_nursery_current, *_stm_nursery_end; + struct stm_segment_info_s { stm_jmpbuf_t *jmpbuf_ptr; /* compat only -- always NULL */ char *nursery_current; /* compat only -- always NULL */ From noreply at buildbot.pypy.org Thu Mar 27 14:48:17 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 27 Mar 2014 14:48:17 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Added and fixed test_strategies.py, added empty classes for special strategy storage. Message-ID: <20140327134817.30F1D1C02C1@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r707:b0835c943e0b Date: 2014-03-27 12:08 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/b0835c943e0b/ Log: Added and fixed test_strategies.py, added empty classes for special strategy storage. diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -46,6 +46,20 @@ for i in range(self.size()): self.copy_field_from(i, other_shadow) +class AllNilStorageShadow(AbstractShadow): + def fetch(self, n0): + raise NotImplementedError("Abstract class") + def store(self, n0, w_value): + raise NotImplementedError("Abstract class") + def size(self): + raise NotImplementedError("Abstract class") + +class SmallIntegerOrNilStorageShadow: + pass + +class FloatOrNilStorageShadow: + pass + class ListStorageShadow(AbstractShadow): _attrs_ = ['storage'] repr_classname = "ListStorageShadow" diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py new file mode 100644 --- /dev/null +++ b/spyvm/test/test_strategies.py @@ -0,0 +1,195 @@ +import py +from spyvm import wrapper, model, interpreter, shadow +from spyvm.error import WrapperException, FatalError +from .util import read_image, copy_to_module, cleanup_module + +def setup_module(): + space, interp, _, _ = read_image('bootstrapped.image') + class_Array = space.classtable["w_Array"] + w_nil = space.w_nil + copy_to_module(locals(), __name__) + +def teardown_module(): + cleanup_module(__name__) + +def arr(size): + return model.W_PointersObject(space, class_Array, size) + +def list_arr(size): + a = arr(size) + a.store(space, 0, arr(1)) + return a + +def int_arr(size): + a = arr(size) + a.store(space, 0, space.wrap_int(12)) + return a + +def float_arr(size): + a = arr(size) + a.store(space, 0, space.wrap_float(1.2)) + return a + +def check_arr(arr, expected): + for i in range(arr.size()): + w_val = arr.fetch(space, i) + if expected[i] == w_nil: + assert w_val == w_nil + elif isinstance(expected[i], int): + assert isinstance(w_val, model.W_SmallInteger) + assert space.unwrap_int(w_val) == expected[i] + elif isinstance(expected[i], float): + assert isinstance(w_val, model.W_Float) + assert space.unwrap_float(w_val) == expected[i] + else: + assert False, "Unexpected array of expected values." + +# ====== AllNil StorageShadow + +def test_EmptyArray(): + a = arr(5) + assert isinstance(a.shadow, shadow.AllNilStorageShadow) + +def test_StoreNil(): + a = arr(5) + a.store(space, 0, w_nil) + a.store(space, 4, w_nil) + assert isinstance(a.shadow, shadow.AllNilStorageShadow) + +def test_FetchNil(): + a = arr(5) + assert a.fetch(space, 2) is w_nil + +def test_AllNilSize(): + a = arr(5) + assert a.size() == 5 + +# ====== List StorageShadow + +def test_AllNil_to_List(): + a = list_arr(5) + assert isinstance(a.shadow, shadow.ListStorageShadow) + +def test_List_store(): + a = list_arr(5) + a.store(space, 1, arr(1)) + a.store(space, 4, arr(1)) + assert isinstance(a.shadow, shadow.ListStorageShadow) + +def test_List_fetch(): + a = list_arr(5) + assert a.fetch(space, 0).getclass(space) == class_Array + assert a.fetch(space, 4) == w_nil + +def test_List_size(): + a = list_arr(5) + a.store(space, 1, arr(1)) + assert a.size() == 5 + +# ====== SmallIntegerOrNil StorageShadow + +def test_AllNil_to_Int(): + a = int_arr(5) + assert isinstance(a.shadow, shadow.SmallIntegerOrNilStorageShadow) + check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) + +def test_SmallInt_store(): + a = int_arr(5) + a.store(space, 1, space.wrap_int(20)) + a.store(space, 2, space.wrap_int(20)) + assert isinstance(a.shadow, shadow.SmallIntegerOrNilStorageShadow) + check_arr(a, [12, 20, 20, w_nil, w_nil]) + +def test_SmallInt_store_nil_to_nil(): + a = int_arr(5) + a.store(space, 1, w_nil) + check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) + +def test_SmallInt_overwrite(): + a = int_arr(5) + a.store(space, 1, space.wrap_int(1)) + a.store(space, 3, space.wrap_int(2)) + a.store(space, 0, space.wrap_int(100)) + a.store(space, 1, space.wrap_int(200)) + a.store(space, 3, space.wrap_int(300)) + check_arr(a, [100, 200, w_nil, 300, w_nil]) + +def test_SmallInt_delete(): + a = int_arr(5) + a.store(space, 1, space.wrap_int(1)) + a.store(space, 1, w_nil) + check_arr(a, [12, w_nil, w_nil, w_nil, w_nil]) + +def test_SmallInt_to_List(): + a = int_arr(5) + a.store(space, 1, arr(1)) + assert isinstance(a.shadow, shadow.ListStorageShadow) + +def test_SmallInt_store_Float_to_List(): + a = int_arr(5) + a.store(space, 1, space.wrap_float(2.2)) + assert isinstance(a.shadow, shadow.ListStorageShadow) + check_arr(a, [12, 2.2, w_nil, w_nil, w_nil]) + +# ====== FloatOrNil StorageShadow + +def test_AllNil_to_Float(): + a = float_arr(5) + assert isinstance(a.shadow, shadow.FloatOrNilStorageShadow) + check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) + +def test_Float_store(): + a = float_arr(5) + a.store(space, 1, space.wrap_float(20.0)) + a.store(space, 2, space.wrap_float(20.0)) + assert isinstance(a.shadow, shadow.FloatOrNilStorageShadow) + check_arr(a, [1.2, 20.0, 20.0, w_nil, w_nil]) + +def test_Float_store_nil_to_nil(): + a = float_arr(5) + a.store(space, 1, w_nil) + check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) + +def test_Float_overwrite(): + a = float_arr(5) + a.store(space, 1, space.wrap_float(1.0)) + a.store(space, 3, space.wrap_float(2.0)) + a.store(space, 0, space.wrap_float(100.0)) + a.store(space, 1, space.wrap_float(200.0)) + a.store(space, 3, space.wrap_float(300.0)) + check_arr(a, [100.0, 200.0, w_nil, 300.0, w_nil]) + +def test_Float_delete(): + a = float_arr(5) + a.store(space, 1, space.wrap_float(1.0)) + a.store(space, 1, w_nil) + check_arr(a, [1.2, w_nil, w_nil, w_nil, w_nil]) + +def test_Float_to_List(): + a = float_arr(5) + a.store(space, 1, arr(1)) + assert isinstance(a.shadow, shadow.ListStorageShadow) + +def test_Float_store_SmallInt_to_List(): + a = float_arr(5) + a.store(space, 1, space.wrap_int(2)) + assert isinstance(a.shadow, shadow.ListStorageShadow) + check_arr(a, [1.2, 2, w_nil, w_nil, w_nil]) + +def test_statistics(): + stats = model.StrategyStatistics() + stats.stat_operation("B", "old", "new", 3) + stats.stat_operation("B", "old", "new", 4) + stats.stat_operation("B", "old2", "new2", 20) + stats.stat_operation("B", "old", "new", 5) + stats.stat_operation("A", "old", "new", 1) + stats.stat_operation("A", "old", "new", 2) + stats.stat_operation("C", "old", "new", 10) + stats.stat_operation("C", "old", "new", 11) + keys = stats.sorted_keys() + assert keys == [ ("A", "old", "new"), ("B", "old", "new"), ("B", "old2", "new2"), ("C", "old", "new") ] + assert stats.stats[keys[0]] == [1, 2] + assert stats.stats[keys[1]] == [3, 4, 5] + assert stats.stats[keys[2]] == [20] + assert stats.stats[keys[3]] == [10, 11] + \ No newline at end of file From noreply at buildbot.pypy.org Thu Mar 27 14:48:18 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 27 Mar 2014 14:48:18 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Added code for specialized storage strategies. Message-ID: <20140327134818.6C5461C02C1@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r708:969853688ab7 Date: 2014-03-27 14:47 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/969853688ab7/ Log: Added code for specialized storage strategies. Fillin-sequence has to be done recursively, in order for the specialized storage strategies to work. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -471,6 +471,8 @@ def fillin(self, space, g_self): W_AbstractObjectWithIdentityHash.fillin(self, space, g_self) + # The class data will be initialized lazily, after the initial fillin-sequence is over. + # Don't construct the ClassShadow here, yet! self.w_class = g_self.get_class() def getclass(self, space): @@ -547,7 +549,7 @@ class W_AbstractPointersObject(W_AbstractObjectWithClassReference): """Common object.""" _attrs_ = ['shadow'] - shadow = None # Default value + shadow = None repr_classname = "W_AbstractPointersObject" @jit.unroll_safe @@ -555,16 +557,34 @@ """Create new object with size = fixed + variable size.""" W_AbstractObjectWithClassReference.__init__(self, space, w_class) self.initialize_storage(space, size) - + def initialize_storage(self, space, size): - self.store_shadow(self.default_storage(space, size)) + self.store_shadow(self.empty_storage(space, size)) def fillin(self, space, g_self): W_AbstractObjectWithClassReference.fillin(self, space, g_self) + # Recursive fillin required to enable specialized storage strategies. + for g_obj in g_self.pointers: + g_obj.fillin(space) pointers = g_self.get_pointers() - self.initialize_storage(space, len(pointers)) + self.store_shadow(self.storage_for_list(space, pointers)) self.store_all(space, pointers) + def empty_storage(self, space, size): + raise NotImplementedError() + def storage_for_list(self, space, vars): + raise NotImplementedError() + + def switch_shadow(self, new_shadow): + if self.shadow is not None: + new_shadow.copy_from(self.shadow) + self.store_shadow(new_shadow) + new_shadow.attach_shadow() + + def store_with_new_storage(self, new_storage, n0, w_val): + self.switch_shadow(new_storage(self.space(), self, self.size())) + self.store(self.space(), n0, w_val) + def space(self): assert self.shadow, "Cannot access space without a shadow!" return self.shadow.space @@ -637,10 +657,7 @@ shadow = old_shadow if not isinstance(old_shadow, TheClass): shadow = TheClass(space, self) - if old_shadow is not None: - shadow.copy_from(old_shadow) - self.store_shadow(shadow) - shadow.attach_shadow() + self.switch_shadow(shadow) return shadow def get_shadow(self, space): @@ -702,15 +719,27 @@ class W_PointersObject(W_AbstractPointersObject): repr_classname = 'W_PointersObject' - def default_storage(self, space, size): - from spyvm.shadow import ListStorageShadow - return ListStorageShadow(space, self, size) + + def empty_storage(self, space, size): + # A newly allocated object contains only nils. + from spyvm.shadow import AllNilStorageShadow + return AllNilStorageShadow(space, self, size) + + def storage_for_list(self, space, vars): + #if not self.class_shadow(space).isvariable(): + # return ListStorageShadow(space, self, len(vars)) + from spyvm.shadow import find_storage_for_objects + return find_storage_for_objects(space, vars)(space, self, len(vars)) class W_WeakPointersObject(W_AbstractPointersObject): repr_classname = 'W_WeakPointersObject' - def default_storage(self, space, size): + + def empty_storage(self, space, size): from spyvm.shadow import WeakListStorageShadow return WeakListStorageShadow(space, self, size) + def storage_for_list(self, space, vars): + from spyvm.shadow import WeakListStorageShadow + return WeakListStorageShadow(space, self, len(vars)) class W_BytesObject(W_AbstractObjectWithClassReference): _attrs_ = ['bytes', 'c_bytes', '_size'] diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -2,9 +2,11 @@ from spyvm import model, constants, error, wrapper, version from spyvm.version import elidable_for_version, constant_for_version from rpython.tool.pairtype import extendabletype -from rpython.rlib import rarithmetic, jit +from rpython.rlib import rarithmetic, jit, longlong2float from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.debug import make_sure_not_resized +from rpython.rlib.rstruct.runpack import runpack +from rpython.rtyper.lltypesystem import rffi, lltype class AbstractShadow(object): """A shadow is an optional extra bit of information that @@ -46,20 +48,145 @@ for i in range(self.size()): self.copy_field_from(i, other_shadow) -class AllNilStorageShadow(AbstractShadow): +class AbstractStorageShadow(AbstractShadow): + repr_classname = "AbstractStorageShadow" + def store(self, n0, w_val): + if self.can_contain(w_val): + return self.do_store(n0, w_val) + new_storage = self.generelized_strategy_for(w_val) + return self._w_self.store_with_new_storage(new_storage, n0, w_val) + def can_contain(self, w_val): + return self.static_can_contain(self.space, w_val) + def do_store(self, n0, w_val): + raise NotImplemtedError() + def generelized_strategy_for(self, w_val): + raise NotImplemtedError() + +class AllNilStorageShadow(AbstractStorageShadow): + repr_classname = "AllNilStorageShadow" + _attrs_ = ['_size'] + _immutable_fields_ = ['_size'] + def __init__(self, space, w_self, size): + AbstractShadow.__init__(self, space, w_self) + self._size = size def fetch(self, n0): - raise NotImplementedError("Abstract class") - def store(self, n0, w_value): - raise NotImplementedError("Abstract class") + if n0 >= self._size: + raise IndexError + return self.space.w_nil + def do_store(self, n0, w_value): + pass def size(self): - raise NotImplementedError("Abstract class") + return self._size + def generelized_strategy_for(self, w_val): + return find_storage_for_objects(self.space, [w_val]) + @staticmethod + def static_can_contain(space, w_val): + return w_val == space.w_nil -class SmallIntegerOrNilStorageShadow: - pass +class AbstractValueOrNilStorageMixin(object): + # Class must provide: wrap, unwrap, nil_value, is_nil_value, wrapper_class + storage = [] + _attrs_ = ['storage'] + + def __init__(self, space, w_self, size): + AbstractStorageShadow.__init__(self, space, w_self) + self.storage = [self.nil_value] * size + + def size(self): + return len(self.storage) + + def generelized_strategy_for(self, w_val): + return ListStorageShadow + + def fetch(self, n0): + val = self.storage[n0] + if self.is_nil_value(val): + return self.space.w_nil + else: + return self.wrap(self.space, val) + + def do_store(self, n0, w_val): + store = self.storage + if w_val == self.space.w_nil: + store[n0] = self.nil_value + else: + store[n0] = self.unwrap(self.space, w_val) -class FloatOrNilStorageShadow: - pass +# This is to avoid code duplication +def _value_or_nil_can_handle(cls, space, w_val): + return w_val == space.w_nil or \ + (isinstance(w_val, cls.wrapper_class) \ + and not cls.is_nil_value(cls.unwrap(space, w_val))) +class SmallIntegerOrNilStorageShadow(AbstractStorageShadow): + repr_classname = "SmallIntegerOrNilStorageShadow" + nil_value = constants.MAXINT + wrapper_class = model.W_SmallInteger + import_from_mixin(AbstractValueOrNilStorageMixin) + + @staticmethod + def static_can_contain(space, w_val): + return _value_or_nil_can_handle(SmallIntegerOrNilStorageShadow, space, w_val) + @staticmethod + def is_nil_value(val): + return val == SmallIntegerOrNilStorageShadow.nil_value + @staticmethod + def wrap(space, val): + return space.wrap_int(val) + @staticmethod + def unwrap(space, w_val): + return space.unwrap_int(w_val) + +class FloatOrNilStorageShadow(AbstractStorageShadow): + repr_classname = "FloatOrNilStorageShadow" + # TODO -- use another value... something like max_float? + nil_value = runpack("d", "\x10\x00\x00\x00\x00\x00\xf8\x7f") + nil_value_longlong = longlong2float.float2longlong(nil_value) + wrapper_class = model.W_Float + import_from_mixin(AbstractValueOrNilStorageMixin) + + @staticmethod + def static_can_contain(space, w_val): + return _value_or_nil_can_handle(FloatOrNilStorageShadow, space, w_val) + @staticmethod + def is_nil_value(val): + return longlong2float.float2longlong(val) == FloatOrNilStorageShadow.nil_value_longlong + @staticmethod + def wrap(space, val): + return space.wrap_float(val) + @staticmethod + def unwrap(space, w_val): + return space.unwrap_float(w_val) + +def find_storage_for_objects(space, vars): + specialized_strategies = 3 + all_nil_can_handle = True + small_int_can_handle = True + float_can_handle = True + for w_obj in vars: + if all_nil_can_handle and not AllNilStorageShadow.static_can_contain(space, w_obj): + all_nil_can_handle = False + specialized_strategies = specialized_strategies - 1 + if small_int_can_handle and not SmallIntegerOrNilStorageShadow.static_can_contain(space, w_obj): + small_int_can_handle = False + specialized_strategies = specialized_strategies - 1 + if float_can_handle and not FloatOrNilStorageShadow.static_can_contain(space, w_obj): + float_can_handle = False + specialized_strategies = specialized_strategies - 1 + + if specialized_strategies <= 0: + return ListStorageShadow + + if all_nil_can_handle: + return AllNilStorageShadow + if small_int_can_handle: + return SmallIntegerOrNilStorageShadow + if float_can_handle: + return FloatOrNilStorageShadow + + # If this happens, please look for a bug in the code above. + assert False, "No strategy could be found for list..." + class ListStorageShadow(AbstractShadow): _attrs_ = ['storage'] repr_classname = "ListStorageShadow" diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -22,7 +22,6 @@ class MockFrame(model.W_PointersObject): def __init__(self, space, stack): - self.space = space size = 6 + len(stack) + 6 self.initialize_storage(space, size) self.store_all(space, [None] * 6 + stack + [space.w_nil] * 6) From noreply at buildbot.pypy.org Thu Mar 27 14:57:06 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 27 Mar 2014 14:57:06 +0100 (CET) Subject: [pypy-commit] stmgc default: fix obvious HTM disabler Message-ID: <20140327135706.79D871C14E7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1106:af451d3e606c Date: 2014-03-27 14:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/af451d3e606c/ Log: fix obvious HTM disabler diff --git a/htm-c7/htm.h b/htm-c7/htm.h --- a/htm-c7/htm.h +++ b/htm-c7/htm.h @@ -18,6 +18,7 @@ #define XBEGIN_XABORT_ARG(x) (((x) >> 24) & 0xFF) static __thread char buf[128]; +__attribute__((unused)) static char* xbegin_status(int status) { if (status == XBEGIN_OK) diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -130,13 +130,6 @@ } -void stm_unregister_thread_local(stm_thread_local_t *tl) { - fprintf(stderr, - "in %p\ngil_transactions: %d\nhtm_transactions: %d\nratio: %f\n", - tl, gil_transactions, htm_transactions, - (float)gil_transactions / (float)htm_transactions); - free(tl->shadowstack_base); -} /************************************************************/ /* some simple thread-local malloc: */ @@ -266,20 +259,40 @@ #define GCFLAG_WRITE_BARRIER _STM_GCFLAG_WRITE_BARRIER -static struct list_s *objects_pointing_to_nursery; -static struct list_s *young_weakrefs; +static __thread struct list_s *objects_pointing_to_nursery; +static __thread struct list_s *young_weakrefs; void stm_setup(void) { - objects_pointing_to_nursery = list_create(); - young_weakrefs = list_create(); } void stm_teardown(void) { +} + +void stm_register_thread_local(stm_thread_local_t *tl) { + objects_pointing_to_nursery = list_create(); + young_weakrefs = list_create(); + + tl->thread_local_obj = NULL; + tl->shadowstack_base = (object_t **)malloc(768*1024); + assert(tl->shadowstack_base); + tl->shadowstack = tl->shadowstack_base; + tl->last_abort__bytes_in_nursery = 0; +} + +void stm_unregister_thread_local(stm_thread_local_t *tl) { + fprintf(stderr, + "in %p\ngil_transactions: %d\nhtm_transactions: %d\nratio: %f\n", + tl, gil_transactions, htm_transactions, + (float)gil_transactions / (float)htm_transactions); + free(tl->shadowstack_base); + list_free(objects_pointing_to_nursery); + list_free(young_weakrefs); } + void _stm_write_slowpath(object_t *obj) { obj->gil_flags &= ~GCFLAG_WRITE_BARRIER; diff --git a/htm-c7/stmgc.h b/htm-c7/stmgc.h --- a/htm-c7/stmgc.h +++ b/htm-c7/stmgc.h @@ -79,13 +79,7 @@ return (object_t *)p; } -inline static void stm_register_thread_local(stm_thread_local_t *tl) { - tl->thread_local_obj = NULL; - tl->shadowstack_base = (object_t **)malloc(768*1024); - assert(tl->shadowstack_base); - tl->shadowstack = tl->shadowstack_base; - tl->last_abort__bytes_in_nursery = 0; -} +void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); extern pthread_mutex_t _stm_gil; From noreply at buildbot.pypy.org Thu Mar 27 15:21:13 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 27 Mar 2014 15:21:13 +0100 (CET) Subject: [pypy-commit] stmgc default: tweaks Message-ID: <20140327142113.4AA5F1D24A1@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1107:422ecbbbfa4e Date: 2014-03-27 15:21 +0100 http://bitbucket.org/pypy/stmgc/changeset/422ecbbbfa4e/ Log: tweaks diff --git a/duhton/demo/micro_transactions.duh b/duhton/demo/micro_transactions.duh --- a/duhton/demo/micro_transactions.duh +++ b/duhton/demo/micro_transactions.duh @@ -1,11 +1,13 @@ -(setq c (container 0)) +;;(setq c (container 0)) (defun increment () ;;(set c (+ (get c) 1)) + (setq c 0) + (setq c (+ c 1)) ) diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -30,7 +30,7 @@ } static int spin_and_acquire_gil(stm_thread_local_t *tl) { - int n = 100; + int n = 5; while ((n --> 0) && mutex_locked(&_stm_gil)) { smp_spinloop(); } @@ -65,10 +65,10 @@ return; } - volatile int status; - volatile int transient_retry_counter = TRANSIENT_RETRY_MAX; - volatile int gil_retry_counter = GIL_RETRY_MAX; - volatile int first_retry = 1; + int status; + int transient_retry_counter = TRANSIENT_RETRY_MAX; + int gil_retry_counter = GIL_RETRY_MAX; + int first_retry = 1; transaction_retry: status = xbegin(); From noreply at buildbot.pypy.org Thu Mar 27 15:27:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Mar 2014 15:27:43 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: cast_ptr_to_int on non-gc objects is by itself not dangerous Message-ID: <20140327142743.2744E1D24A1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70305:09f9774d8d28 Date: 2014-03-27 15:26 +0100 http://bitbucket.org/pypy/pypy/changeset/09f9774d8d28/ Log: cast_ptr_to_int on non-gc objects is by itself not dangerous diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -666,6 +666,7 @@ OP_CAST_ADR_TO_PTR = OP_CAST_POINTER OP_CAST_OPAQUE_PTR = OP_CAST_POINTER OP_CAST_PTR_TO_ADR = OP_CAST_POINTER + OP_CAST_PTR_TO_INT = OP_CAST_POINTER def OP_CAST_INT_TO_PTR(self, op): TYPE = self.lltypemap(op.result) diff --git a/rpython/translator/c/src/int.h b/rpython/translator/c/src/int.h --- a/rpython/translator/c/src/int.h +++ b/rpython/translator/c/src/int.h @@ -224,7 +224,6 @@ #define OP_CAST_INT_TO_LONGLONGLONG(x,r) r = (__int128)(x) #define OP_CAST_CHAR_TO_INT(x,r) r = (Signed)((unsigned char)(x)) #define OP_CAST_INT_TO_CHAR(x,r) r = (char)(x) -#define OP_CAST_PTR_TO_INT(x,r) r = (Signed)(x) #define OP_CAST_CURRENT_PTR_TO_INT(x,r) r = (Signed)(x) #define OP_TRUNCATE_LONGLONG_TO_INT(x,r) r = (Signed)(x) diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -12,6 +12,7 @@ 'cast_opaque_ptr', 'hint', 'stack_current', 'gc_stack_bottom', 'cast_current_ptr_to_int', # this variant of 'cast_ptr_to_int' is ok + 'cast_ptr_to_int', # only for non-gc, crashes in genc for gc 'jit_force_virtual', 'jit_force_virtualizable', 'jit_force_quasi_immutable', 'jit_marker', 'jit_is_virtual', 'jit_record_known_class', From noreply at buildbot.pypy.org Thu Mar 27 15:32:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Mar 2014 15:32:41 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix: ll_int2hex turned inevitable Message-ID: <20140327143241.5AF801D24A1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70306:f7a704c145ed Date: 2014-03-27 15:31 +0100 http://bitbucket.org/pypy/pypy/changeset/f7a704c145ed/ Log: Fix: ll_int2hex turned inevitable diff --git a/rpython/rtyper/lltypesystem/ll_str.py b/rpython/rtyper/lltypesystem/ll_str.py --- a/rpython/rtyper/lltypesystem/ll_str.py +++ b/rpython/rtyper/lltypesystem/ll_str.py @@ -39,7 +39,7 @@ j += 1 return result -hex_chars = malloc(Array(Char), 16, immortal=True) +hex_chars = malloc(Array(Char, hints={'immutable': True}), 16, immortal=True) for i in range(16): hex_chars[i] = "%x" % i From noreply at buildbot.pypy.org Thu Mar 27 15:47:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Mar 2014 15:47:43 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Make some of the time.*() functions transaction-safe. The rest is not reentrant (maybe we should use ctime_r etc.?) Message-ID: <20140327144743.8F9FE1D2806@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70307:64ff5f5f4ec8 Date: 2014-03-27 15:47 +0100 http://bitbucket.org/pypy/pypy/changeset/64ff5f5f4ec8/ Log: Make some of the time.*() functions transaction-safe. The rest is not reentrant (maybe we should use ctime_r etc.?) diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -109,7 +109,6 @@ ) CLOCKS_PER_SEC = platform.ConstantInteger("CLOCKS_PER_SEC") clock_t = platform.SimpleType("clock_t", rffi.ULONG) - has_gettimeofday = platform.Has('gettimeofday') if _POSIX: calling_conv = 'c' @@ -141,7 +140,7 @@ setattr(cConfig, k, v) cConfig.tm.__name__ = "_tm" -def external(name, args, result, eci=CConfig._compilation_info_): +def external(name, args, result, eci=CConfig._compilation_info_, **kwds): if _WIN and rffi.sizeof(rffi.TIME_T) == 8: # Recent Microsoft compilers use 64bit time_t and # the corresponding functions are named differently @@ -151,7 +150,8 @@ return rffi.llexternal(name, args, result, compilation_info=eci, calling_conv=calling_conv, - releasegil=False) + releasegil=False, + **kwds) if _POSIX: cConfig.timeval.__name__ = "_timeval" @@ -162,16 +162,13 @@ tm = cConfig.tm glob_buf = lltype.malloc(tm, flavor='raw', zero=True, immortal=True) -if cConfig.has_gettimeofday: - c_gettimeofday = external('gettimeofday', [rffi.VOIDP, rffi.VOIDP], rffi.INT) TM_P = lltype.Ptr(tm) -c_clock = external('clock', [rffi.TIME_TP], clock_t) -c_time = external('time', [rffi.TIME_TP], rffi.TIME_T) -c_ctime = external('ctime', [rffi.TIME_TP], rffi.CCHARP) -c_gmtime = external('gmtime', [rffi.TIME_TP], TM_P) -c_mktime = external('mktime', [TM_P], rffi.TIME_T) -c_asctime = external('asctime', [TM_P], rffi.CCHARP) -c_localtime = external('localtime', [rffi.TIME_TP], TM_P) +c_time = external('time', [rffi.TIME_TP], rffi.TIME_T, transactionsafe=True) +c_ctime = external('ctime', [rffi.TIME_TP], rffi.CCHARP) # not reentrant +c_gmtime = external('gmtime', [rffi.TIME_TP], TM_P) # not reentrant +c_mktime = external('mktime', [TM_P], rffi.TIME_T, transactionsafe=True) +c_asctime = external('asctime', [TM_P], rffi.CCHARP) # not reentrant +c_localtime = external('localtime', [rffi.TIME_TP], TM_P) # not reentrant if _POSIX: c_tzset = external('tzset', [], lltype.Void) if _WIN: @@ -195,7 +192,7 @@ c_get_tzname = external('pypy_get_tzname', [], rffi.CCHARPP, win_eci) c_strftime = external('strftime', [rffi.CCHARP, rffi.SIZE_T, rffi.CCHARP, TM_P], - rffi.SIZE_T) + rffi.SIZE_T, transactionsafe=True) def _init_accept2dyear(space): if os.environ.get("PYTHONY2K"): From noreply at buildbot.pypy.org Thu Mar 27 15:51:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Mar 2014 15:51:12 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Some more transaction-safety. Message-ID: <20140327145112.C01571D2806@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70308:c1aa7aff6a7e Date: 2014-03-27 15:50 +0100 http://bitbucket.org/pypy/pypy/changeset/c1aa7aff6a7e/ Log: Some more transaction-safety. diff --git a/rpython/rtyper/module/ll_time.py b/rpython/rtyper/module/ll_time.py --- a/rpython/rtyper/module/ll_time.py +++ b/rpython/rtyper/module/ll_time.py @@ -85,11 +85,13 @@ if self.GETTIMEOFDAY_NO_TZ: c_gettimeofday = self.llexternal('gettimeofday', [self.TIMEVALP], rffi.INT, - _nowrapper=True, releasegil=False) + _nowrapper=True, releasegil=False, + transactionsafe=True) else: c_gettimeofday = self.llexternal('gettimeofday', [self.TIMEVALP, rffi.VOIDP], rffi.INT, - _nowrapper=True, releasegil=False) + _nowrapper=True, releasegil=False, + transactionsafe=True) c_ftime = None # We have gettimeofday(2), so force ftime(3) OFF. else: c_gettimeofday = None @@ -99,12 +101,14 @@ self.configure(CConfigForFTime) c_ftime = self.llexternal(FTIME, [lltype.Ptr(self.TIMEB)], lltype.Void, - _nowrapper=True, releasegil=False) + _nowrapper=True, releasegil=False, + transactionsafe=True) else: c_ftime = None # to not confuse the flow space c_time = self.llexternal('time', [rffi.VOIDP], rffi.TIME_T, - _nowrapper=True, releasegil=False) + _nowrapper=True, releasegil=False, + transactionsafe=True) def time_time_llimpl(): void = lltype.nullptr(rffi.VOIDP.TO) @@ -142,10 +146,10 @@ A = lltype.FixedSizeArray(lltype.SignedLongLong, 1) QueryPerformanceCounter = self.llexternal( 'QueryPerformanceCounter', [lltype.Ptr(A)], lltype.Void, - releasegil=False) + releasegil=False, transactionsafe=True) QueryPerformanceFrequency = self.llexternal( 'QueryPerformanceFrequency', [lltype.Ptr(A)], rffi.INT, - releasegil=False) + releasegil=False, transactionsafe=True) class State(object): pass state = State() @@ -168,7 +172,8 @@ c_getrusage = self.llexternal('getrusage', [rffi.INT, lltype.Ptr(RUSAGE)], lltype.Void, - releasegil=False) + releasegil=False, + transactionsafe=True) def time_clock_llimpl(): a = lltype.malloc(RUSAGE, flavor='raw') c_getrusage(RUSAGE_SELF, a) From noreply at buildbot.pypy.org Thu Mar 27 16:37:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Mar 2014 16:37:56 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Fix: must not use cast_ptr_to_int() here Message-ID: <20140327153756.BF7B41D29C5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70309:1f5cf3792b5d Date: 2014-03-27 16:37 +0100 http://bitbucket.org/pypy/pypy/changeset/1f5cf3792b5d/ Log: Fix: must not use cast_ptr_to_int() here diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1,5 +1,6 @@ import weakref from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_start, debug_stop, debug_print @@ -593,7 +594,7 @@ intval = metainterp_sd.cpu.get_int_value(deadframe, index) elif typetag == self.TY_REF: refval = metainterp_sd.cpu.get_ref_value(deadframe, index) - intval = lltype.cast_ptr_to_int(refval) + intval = llop.cast_current_ptr_to_int(lltype.Signed, refval) elif typetag == self.TY_FLOAT: floatval = metainterp_sd.cpu.get_float_value(deadframe, index) intval = longlong.gethash_fast(floatval) From noreply at buildbot.pypy.org Thu Mar 27 16:42:08 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 27 Mar 2014 16:42:08 +0100 (CET) Subject: [pypy-commit] stmgc default: make htm-c7 pypy-compatible Message-ID: <20140327154208.878B81D29C4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1108:f6d1d934b8ab Date: 2014-03-27 16:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/f6d1d934b8ab/ Log: make htm-c7 pypy-compatible diff --git a/htm-c7/stmgc.h b/htm-c7/stmgc.h --- a/htm-c7/stmgc.h +++ b/htm-c7/stmgc.h @@ -21,7 +21,7 @@ extern __thread struct htm_transaction_info_s _htm_info; -typedef struct { /* empty */ } stm_jmpbuf_t; +typedef void* stm_jmpbuf_t[5]; /* for use with __builtin_setjmp() */ typedef struct object_s { uint32_t gil_flags; @@ -32,6 +32,8 @@ object_t **shadowstack_base; object_t *thread_local_obj; long last_abort__bytes_in_nursery; + char *mem_clear_on_abort; /* compat only -- always NULL */ + size_t mem_bytes_to_clear_on_abort; /* compat only -- always NULL */ } stm_thread_local_t; extern stm_thread_local_t *_stm_tloc; @@ -92,6 +94,9 @@ void stm_start_inevitable_transaction(stm_thread_local_t *tl); void stm_commit_transaction(void); +inline static void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *buf) +{ stm_start_inevitable_transaction(tl); } + inline static void stm_become_inevitable( stm_thread_local_t *tl, const char *msg) { } inline static void _stm_become_inevitable(const char *msg) { } From noreply at buildbot.pypy.org Thu Mar 27 17:54:02 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 27 Mar 2014 17:54:02 +0100 (CET) Subject: [pypy-commit] stmgc default: wah, some more obvious fixes Message-ID: <20140327165402.B44DF1D29C8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1109:ef9851277c7c Date: 2014-03-27 17:54 +0100 http://bitbucket.org/pypy/stmgc/changeset/ef9851277c7c/ Log: wah, some more obvious fixes diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -4,8 +4,9 @@ #include "htm.h" pthread_mutex_t _stm_gil = PTHREAD_MUTEX_INITIALIZER; -stm_thread_local_t *_stm_tloc; -struct stm_segment_info_s _stm_segment; +__thread stm_thread_local_t *_stm_tloc; +//struct stm_segment_info_s _stm_segment; +__thread struct stm_segment_info_s* _stm_segment; #define TRANSIENT_RETRY_MAX 5 #define GIL_RETRY_MAX 5 @@ -273,6 +274,7 @@ void stm_register_thread_local(stm_thread_local_t *tl) { objects_pointing_to_nursery = list_create(); young_weakrefs = list_create(); + _stm_segment = malloc(sizeof(struct stm_segment_info_s)); tl->thread_local_obj = NULL; tl->shadowstack_base = (object_t **)malloc(768*1024); @@ -290,6 +292,7 @@ list_free(objects_pointing_to_nursery); list_free(young_weakrefs); + free(_stm_segment); } @@ -323,9 +326,9 @@ #define NB_NURSERY_PAGES 1024 // 4MB #define NURSERY_SIZE (NB_NURSERY_PAGES * 4096UL) -char *_stm_nursery_base = NULL; -char *_stm_nursery_current = NULL; -char *_stm_nursery_end = NULL; +__thread char *_stm_nursery_base = NULL; +__thread char *_stm_nursery_current = NULL; +__thread char *_stm_nursery_end = NULL; #define _stm_nursery_start ((uintptr_t)_stm_nursery_base) static bool _is_in_nursery(object_t *obj) @@ -427,7 +430,7 @@ static void throw_away_nursery(void) { if (_stm_nursery_base == NULL) { - _stm_nursery_base = malloc(NURSERY_SIZE); + _stm_nursery_base = tl_malloc(NURSERY_SIZE); assert(_stm_nursery_base); _stm_nursery_end = _stm_nursery_base + NURSERY_SIZE; _stm_nursery_current = _stm_nursery_base; @@ -435,6 +438,7 @@ memset(_stm_nursery_base, 0, _stm_nursery_current-_stm_nursery_base); _stm_nursery_current = _stm_nursery_base; + STM_SEGMENT->nursery_current = _stm_nursery_current; } #define WEAKREF_PTR(wr, sz) ((object_t * TLPREFIX *)(((char *)(wr)) + (sz) - sizeof(void*))) @@ -503,6 +507,7 @@ char *end = p + size_rounded_up; assert(end <= _stm_nursery_end); _stm_nursery_current = end; + STM_SEGMENT->nursery_current = end; return (object_t *)p; } diff --git a/htm-c7/stmgc.h b/htm-c7/stmgc.h --- a/htm-c7/stmgc.h +++ b/htm-c7/stmgc.h @@ -36,16 +36,17 @@ size_t mem_bytes_to_clear_on_abort; /* compat only -- always NULL */ } stm_thread_local_t; -extern stm_thread_local_t *_stm_tloc; -extern char *_stm_nursery_current, *_stm_nursery_end; +extern __thread stm_thread_local_t *_stm_tloc; +extern __thread char *_stm_nursery_current, *_stm_nursery_end; struct stm_segment_info_s { stm_jmpbuf_t *jmpbuf_ptr; /* compat only -- always NULL */ - char *nursery_current; /* compat only -- always NULL */ + char *nursery_current; /* updated... */ }; -extern struct stm_segment_info_s _stm_segment; -#define STM_SEGMENT (&_stm_segment) +//extern struct stm_segment_info_s _stm_segment; +extern __thread struct stm_segment_info_s *_stm_segment; +#define STM_SEGMENT (_stm_segment) #ifdef NDEBUG #define OPT_ASSERT(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) @@ -75,6 +76,7 @@ char *p = _stm_nursery_current; char *end = p + size_rounded_up; _stm_nursery_current = end; + STM_SEGMENT->nursery_current = end; if (UNLIKELY(end > _stm_nursery_end)) return _stm_allocate_slowpath(size_rounded_up); From noreply at buildbot.pypy.org Thu Mar 27 18:59:17 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 27 Mar 2014 18:59:17 +0100 (CET) Subject: [pypy-commit] stmgc default: some tweaks that didn't help Message-ID: <20140327175917.9B14B1D2AC7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1110:5a0622b6bff5 Date: 2014-03-27 18:59 +0100 http://bitbucket.org/pypy/stmgc/changeset/5a0622b6bff5/ Log: some tweaks that didn't help diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -8,8 +8,8 @@ //struct stm_segment_info_s _stm_segment; __thread struct stm_segment_info_s* _stm_segment; -#define TRANSIENT_RETRY_MAX 5 -#define GIL_RETRY_MAX 5 +#define TRANSIENT_RETRY_MAX 10 +#define GIL_RETRY_MAX 10 #define ABORT_GIL_LOCKED 1 @@ -17,7 +17,7 @@ static __thread int gil_transactions = 0; static __thread int htm_transactions = 0; -__thread struct htm_transaction_info_s _htm_info; +__thread struct htm_transaction_info_s _htm_info __attribute__((aligned(64))); #define smp_spinloop() asm volatile ("pause":::"memory") @@ -32,13 +32,12 @@ static int spin_and_acquire_gil(stm_thread_local_t *tl) { int n = 5; - while ((n --> 0) && mutex_locked(&_stm_gil)) { + while (n-- > 0) { + if (!mutex_locked(&_stm_gil)) + return 0; smp_spinloop(); } - if (!mutex_locked(&_stm_gil)) - return 0; - acquire_gil(tl); return 1; } @@ -274,10 +273,10 @@ void stm_register_thread_local(stm_thread_local_t *tl) { objects_pointing_to_nursery = list_create(); young_weakrefs = list_create(); - _stm_segment = malloc(sizeof(struct stm_segment_info_s)); + _stm_segment = tl_malloc(sizeof(struct stm_segment_info_s)); tl->thread_local_obj = NULL; - tl->shadowstack_base = (object_t **)malloc(768*1024); + tl->shadowstack_base = (object_t **)tl_malloc(768*1024); assert(tl->shadowstack_base); tl->shadowstack = tl->shadowstack_base; tl->last_abort__bytes_in_nursery = 0; @@ -288,11 +287,11 @@ "in %p\ngil_transactions: %d\nhtm_transactions: %d\nratio: %f\n", tl, gil_transactions, htm_transactions, (float)gil_transactions / (float)htm_transactions); - free(tl->shadowstack_base); + //free(tl->shadowstack_base); list_free(objects_pointing_to_nursery); list_free(young_weakrefs); - free(_stm_segment); + //free(_stm_segment); } diff --git a/htm-c7/stmgc.h b/htm-c7/stmgc.h --- a/htm-c7/stmgc.h +++ b/htm-c7/stmgc.h @@ -18,7 +18,7 @@ int retry_counter; /* only counting transient aborts of HTM */ int use_gil; /* in GIL mode? 0=HTM */ }; -extern __thread struct htm_transaction_info_s _htm_info; +extern __thread struct htm_transaction_info_s _htm_info __attribute__((aligned(64))); typedef void* stm_jmpbuf_t[5]; /* for use with __builtin_setjmp() */ @@ -43,6 +43,7 @@ struct stm_segment_info_s { stm_jmpbuf_t *jmpbuf_ptr; /* compat only -- always NULL */ char *nursery_current; /* updated... */ + int segment_num; /* compat only -- always NULL */ }; //extern struct stm_segment_info_s _stm_segment; extern __thread struct stm_segment_info_s *_stm_segment; From noreply at buildbot.pypy.org Thu Mar 27 19:02:07 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 27 Mar 2014 19:02:07 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: HTM adaptions Message-ID: <20140327180207.3D90A1D2AD0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r70310:c62679ce9830 Date: 2014-03-27 19:01 +0100 http://bitbucket.org/pypy/pypy/changeset/c62679ce9830/ Log: HTM adaptions diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -2,7 +2,7 @@ /* This is not meant to be compiled stand-alone, but with all of PyPy's #defines and #includes prepended. */ -__thread struct stm_thread_local_s stm_thread_local; +__thread struct stm_thread_local_s stm_thread_local __attribute__((aligned(64))); /* 0 = not initialized; 1 = normal mode; 2 or more = atomic mode */ __thread long pypy_stm_ready_atomic; @@ -102,6 +102,13 @@ to a value slightly smaller than the value at last abort. */ long counter, limit; +#ifdef HTM_INFO_AVAILABLE + if (_htm_info.use_gil) + counter = 0; /* maybe we want the default size here... */ + else + counter = _htm_info.retry_counter; + limit = pypy_transaction_length >> counter; +#else counter = *v_counter; *v_counter = counter + 1; @@ -112,6 +119,8 @@ limit = stm_thread_local.last_abort__bytes_in_nursery; limit -= (limit >> 4); } +#endif + pypy_stm_nursery_low_fill_mark = _stm_nursery_start + limit; pypy_stm_ready_atomic = 1; /* reset after abort */ } From noreply at buildbot.pypy.org Thu Mar 27 19:25:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Mar 2014 19:25:26 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c7: Revert of some old and recent checkins, getting rid of cast_current_ptr_to_int again. It's not Message-ID: <20140327182526.19D8D1D29C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70311:9fd001585246 Date: 2014-03-27 19:24 +0100 http://bitbucket.org/pypy/pypy/changeset/9fd001585246/ Log: Revert of some old and recent checkins, getting rid of cast_current_ptr_to_int again. It's not different any more from the trunk's cast_ptr_to_int, which gets us the address of the GC object, which might still be moving once but which is otherwise coherent. diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1,6 +1,5 @@ import weakref from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_start, debug_stop, debug_print @@ -594,7 +593,7 @@ intval = metainterp_sd.cpu.get_int_value(deadframe, index) elif typetag == self.TY_REF: refval = metainterp_sd.cpu.get_ref_value(deadframe, index) - intval = llop.cast_current_ptr_to_int(lltype.Signed, refval) + intval = lltype.cast_ptr_to_int(refval) elif typetag == self.TY_FLOAT: floatval = metainterp_sd.cpu.get_float_value(deadframe, index) intval = longlong.gethash_fast(floatval) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -572,7 +572,7 @@ hop.exception_cannot_occur() from rpython.rtyper.lltypesystem import lltype if isinstance(vobj.concretetype, lltype.Ptr): - return hop.genop('cast_current_ptr_to_int', [vobj], + return hop.genop('cast_ptr_to_int', [vobj], resulttype = lltype.Signed) from rpython.rtyper.error import TyperError raise TyperError("current_object_addr_as_int() cannot be applied to" diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -749,8 +749,6 @@ checkptr(ptr1) return lltype.cast_ptr_to_int(ptr1) - op_cast_current_ptr_to_int = op_cast_ptr_to_int - def op_cast_opaque_ptr(self, RESTYPE, obj): checkptr(obj) return lltype.cast_opaque_ptr(RESTYPE, obj) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -399,7 +399,6 @@ 'ptr_iszero': LLOp(canfold=True), 'cast_ptr_to_int': LLOp(sideeffects=False), 'cast_int_to_ptr': LLOp(sideeffects=False), - 'cast_current_ptr_to_int': LLOp(sideeffects=False), # gcptr->int, approx. 'direct_fieldptr': LLOp(canfold=True), 'direct_arrayitems': LLOp(canfold=True), 'direct_ptradd': LLOp(canfold=True), diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -666,7 +666,6 @@ OP_CAST_ADR_TO_PTR = OP_CAST_POINTER OP_CAST_OPAQUE_PTR = OP_CAST_POINTER OP_CAST_PTR_TO_ADR = OP_CAST_POINTER - OP_CAST_PTR_TO_INT = OP_CAST_POINTER def OP_CAST_INT_TO_PTR(self, op): TYPE = self.lltypemap(op.result) diff --git a/rpython/translator/c/src/int.h b/rpython/translator/c/src/int.h --- a/rpython/translator/c/src/int.h +++ b/rpython/translator/c/src/int.h @@ -224,6 +224,7 @@ #define OP_CAST_INT_TO_LONGLONGLONG(x,r) r = (__int128)(x) #define OP_CAST_CHAR_TO_INT(x,r) r = (Signed)((unsigned char)(x)) #define OP_CAST_INT_TO_CHAR(x,r) r = (char)(x) +#define OP_CAST_PTR_TO_INT(x,r) r = (Signed)(x) #define OP_CAST_CURRENT_PTR_TO_INT(x,r) r = (Signed)(x) #define OP_TRUNCATE_LONGLONG_TO_INT(x,r) r = (Signed)(x) diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -10,9 +10,7 @@ 'debug_print', 'debug_assert', 'debug_start', 'debug_stop', 'have_debug_prints', 'cast_opaque_ptr', 'hint', - 'stack_current', 'gc_stack_bottom', - 'cast_current_ptr_to_int', # this variant of 'cast_ptr_to_int' is ok - 'cast_ptr_to_int', # only for non-gc, crashes in genc for gc + 'stack_current', 'gc_stack_bottom', 'cast_ptr_to_int', 'jit_force_virtual', 'jit_force_virtualizable', 'jit_force_quasi_immutable', 'jit_marker', 'jit_is_virtual', 'jit_record_known_class', From noreply at buildbot.pypy.org Fri Mar 28 08:22:00 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 28 Mar 2014 08:22:00 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: reopen branch Message-ID: <20140328072200.EE88B1D2ADB@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-fixes4 Changeset: r70312:9ea80c1048f7 Date: 2014-03-28 09:39 +0300 http://bitbucket.org/pypy/pypy/changeset/9ea80c1048f7/ Log: reopen branch diff --git a/lib-python/2.7/test/test_genericpath.py b/lib-python/2.7/test/test_genericpath.py --- a/lib-python/2.7/test/test_genericpath.py +++ b/lib-python/2.7/test/test_genericpath.py @@ -232,9 +232,11 @@ try: fsencoding = test_support.TESTFN_ENCODING or "ascii" asciival = unicwd.encode(fsencoding) - v = asciival.find('?') - if v >= 0: - raise UnicodeEncodeError(fsencoding, unicwd, v, v, asciival) + if fsencoding == "mbcs": + # http://bugs.python.org/issue850997 + v = asciival.find('?') + if v >= 0: + raise UnicodeEncodeError(fsencoding, unicwd, v, v, asciival) except (AttributeError, UnicodeEncodeError): # FS encoding is probably ASCII or windows and codepage is non-Latin1 pass diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -211,6 +211,9 @@ .. __: `recursion depth limit`_ +We also do not include any of the recent API additions to Stackless +Python, like ``set_atomic()``. Contributions welcome. + Recursion depth limit +++++++++++++++++++++ diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -700,17 +700,17 @@ return toencode = u'caf\xe9', 'caf\xe9' try: - #test for non-latin1 codepage, more general test needed + # test for non-latin1 codepage, more general test needed import _winreg - key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, + key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, r'System\CurrentControlSet\Control\Nls\CodePage') - if _winreg.QueryValueEx(key, 'ACP')[0] == u'1255': #non-latin1 + if _winreg.QueryValueEx(key, 'ACP')[0] == u'1255': # non-latin1 toencode = u'caf\xbf','caf\xbf' except: assert False, 'cannot test mbcs on this windows system, check code page' assert u'test'.encode('mbcs') == 'test' assert toencode[0].encode('mbcs') == toencode[1] - assert u'\u040a'.encode('mbcs') == '?' # some cyrillic letter + assert u'\u040a'.encode('mbcs') == '?' # some cyrillic letter assert 'cafx\e9'.decode('mbcs') == u'cafx\e9' def test_bad_handler_string_result(self): diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,6 +64,8 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] + # tests are not strictly ansi C compliant, compile as C++ + kwds["compile_extra"].append("/TP") # prevent linking with PythonXX.lib w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2] kwds["link_extra"] = ["/NODEFAULTLIB:Python%d%d.lib" % @@ -640,30 +642,30 @@ body = """ static PyObject* foo_pi(PyObject* self, PyObject *args) { - PyObject *true = Py_True; - int refcnt = true->ob_refcnt; + PyObject *true_obj = Py_True; + int refcnt = true_obj->ob_refcnt; int refcnt_after; - Py_INCREF(true); - Py_INCREF(true); - PyBool_Check(true); - refcnt_after = true->ob_refcnt; - Py_DECREF(true); - Py_DECREF(true); + Py_INCREF(true_obj); + Py_INCREF(true_obj); + PyBool_Check(true_obj); + refcnt_after = true_obj->ob_refcnt; + Py_DECREF(true_obj); + Py_DECREF(true_obj); fprintf(stderr, "REFCNT %i %i\\n", refcnt, refcnt_after); return PyBool_FromLong(refcnt_after == refcnt+2 && refcnt < 3); } static PyObject* foo_bar(PyObject* self, PyObject *args) { - PyObject *true = Py_True; + PyObject *true_obj = Py_True; PyObject *tup = NULL; - int refcnt = true->ob_refcnt; + int refcnt = true_obj->ob_refcnt; int refcnt_after; tup = PyTuple_New(1); - Py_INCREF(true); - if (PyTuple_SetItem(tup, 0, true) < 0) + Py_INCREF(true_obj); + if (PyTuple_SetItem(tup, 0, true_obj) < 0) return NULL; - refcnt_after = true->ob_refcnt; + refcnt_after = true_obj->ob_refcnt; Py_DECREF(tup); fprintf(stderr, "REFCNT2 %i %i\\n", refcnt, refcnt_after); return PyBool_FromLong(refcnt_after == refcnt); diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -107,10 +107,10 @@ } EnumObject; static void - enum_dealloc(EnumObject *op) + enum_dealloc(PyObject *op) { - Py_DECREF(op->ob_name); - Py_TYPE(op)->tp_free((PyObject *)op); + Py_DECREF(((EnumObject *)op)->ob_name); + Py_TYPE(op)->tp_free(op); } static PyMemberDef enum_members[] = { diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -161,22 +161,25 @@ return space.index(self.item(space)) def descr_int(self, space): - if isinstance(self, W_UnsignedIntegerBox): - box = self.convert_to(space, W_UInt64Box._get_dtype(space)) + if isinstance(self, W_ComplexFloatingBox): + box = self.descr_get_real(space) else: - box = self.convert_to(space, W_Int64Box._get_dtype(space)) - return space.int(box.item(space)) + box = self + return space.call_function(space.w_int, box.item(space)) def descr_long(self, space): - if isinstance(self, W_UnsignedIntegerBox): - box = self.convert_to(space, W_UInt64Box._get_dtype(space)) + if isinstance(self, W_ComplexFloatingBox): + box = self.descr_get_real(space) else: - box = self.convert_to(space, W_Int64Box._get_dtype(space)) - return space.long(box.item(space)) + box = self + return space.call_function(space.w_long, box.item(space)) def descr_float(self, space): - box = self.convert_to(space, W_Float64Box._get_dtype(space)) - return space.float(box.item(space)) + if isinstance(self, W_ComplexFloatingBox): + box = self.descr_get_real(space) + else: + box = self + return space.call_function(space.w_float, box.item(space)) def descr_oct(self, space): return space.oct(self.descr_int(space)) @@ -185,8 +188,7 @@ return space.hex(self.descr_int(space)) def descr_nonzero(self, space): - dtype = self.get_dtype(space) - return space.wrap(dtype.itemtype.bool(self)) + return space.wrap(self.get_dtype(space).itemtype.bool(self)) def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -48,6 +48,8 @@ order = 'C' else: order = space.str_w(w_order) + if order == 'K': + order = 'C' if order != 'C': # or order != 'F': raise oefmt(space.w_ValueError, "Unknown order: %s", order) @@ -100,7 +102,7 @@ @unwrap_spec(subok=bool) def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): w_a = convert_to_array(space, w_a) - if w_dtype is None: + if space.is_none(w_dtype): dtype = w_a.get_dtype() else: dtype = space.interp_w(descriptor.W_Dtype, diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -185,7 +185,7 @@ return chunks.apply(space, self) shape = res_shape + self.get_shape()[len(indexes):] w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), - self.get_order(), w_instance=self) + self.get_order(), w_instance=self) if not w_res.get_size(): return w_res return loop.getitem_array_int(space, self, w_res, iter_shape, indexes, @@ -201,6 +201,8 @@ view = chunks.apply(space, self) view.implementation.setslice(space, val_arr) return + if support.product(iter_shape) == 0: + return loop.setitem_array_int(space, self, iter_shape, indexes, val_arr, prefix) @@ -1169,7 +1171,7 @@ raise OperationError(space.w_TypeError, space.wrap( "numpy scalars from buffers not supported yet")) totalsize = support.product(shape) * dtype.elsize - if totalsize+offset > buf.getlength(): + if totalsize + offset > buf.getlength(): raise OperationError(space.w_TypeError, space.wrap( "buffer is too small for requested array")) storage = rffi.cast(RAW_STORAGE_PTR, raw_ptr) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -334,6 +334,15 @@ b = array(a, dtype=float) assert b == 123.0 + a = array([[123, 456]]) + assert a.flags['C'] + b = array(a, order='K') + assert b.flags['C'] + assert (b == a).all() + b = array(a, order='K', copy=True) + assert b.flags['C'] + assert (b == a).all() + def test_dtype_attribute(self): import numpy as np a = np.array(40000, dtype='uint16') @@ -404,6 +413,8 @@ assert b.shape == a.shape assert b.dtype == a.dtype assert b[0,0] != 1 + b = np.empty_like(np.array(True), dtype=None) + assert b.dtype is np.dtype(bool) b = np.empty_like(a, dtype='i4') assert b.shape == a.shape assert b.dtype == np.dtype('i4') @@ -2369,6 +2380,19 @@ assert b.shape == b[...].shape assert (b == b[...]).all() + def test_empty_indexing(self): + import numpy as np + r = np.ones(3) + ind = np.array([], np.int32) + tmp = np.array([], np.float64) + assert r[ind].shape == (0,) + r[ind] = 0 + assert (r == np.ones(3)).all() + r[ind] = tmp + assert (r == np.ones(3)).all() + r[[]] = 0 + assert (r == np.ones(3)).all() + class AppTestNumArrayFromBuffer(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -36,6 +36,24 @@ exc = raises(ValueError, "int(np.str_('abc'))") assert exc.value.message.startswith('invalid literal for int()') assert int(np.uint64((2<<63) - 1)) == (2<<63) - 1 + exc = raises(ValueError, "int(np.float64(np.nan))") + assert str(exc.value) == "cannot convert float NaN to integer" + exc = raises(OverflowError, "int(np.float64(np.inf))") + assert str(exc.value) == "cannot convert float infinity to integer" + assert int(np.float64(1e100)) == int(1e100) + assert long(np.float64(1e100)) == int(1e100) + assert int(np.complex128(1e100+2j)) == int(1e100) + exc = raises(OverflowError, "int(np.complex64(1e100+2j))") + assert str(exc.value) == "cannot convert float infinity to integer" + assert int(np.str_('100000000000000000000')) == 100000000000000000000 + assert long(np.str_('100000000000000000000')) == 100000000000000000000 + + assert float(np.float64(1e100)) == 1e100 + assert float(np.complex128(1e100+2j)) == 1e100 + assert float(np.str_('1e100')) == 1e100 + assert float(np.str_('inf')) == np.inf + assert str(float(np.float64(np.nan))) == 'nan' + assert oct(np.int32(11)) == '013' assert oct(np.float32(11.6)) == '013' assert oct(np.complex64(11-12j)) == '013' diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -13,6 +13,7 @@ from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rclass, rstr +from rpython.rlib.clibffi import FFI_DEFAULT_ABI from rpython.rlib.rarithmetic import ovfcheck, r_uint, r_ulonglong from rpython.rlib.rtimer import read_timestamp @@ -66,7 +67,6 @@ self.args = args class CallDescr(AbstractDescr): - from rpython.rlib.clibffi import FFI_DEFAULT_ABI def __init__(self, RESULT, ARGS, extrainfo, ABI=FFI_DEFAULT_ABI): self.RESULT = RESULT self.ARGS = ARGS diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -59,9 +59,9 @@ def f(): return rposix.stat(self.path).st_mtime if sys.platform == 'win32': - #double vs. float, be satisfied with sub-millisec resolution + # double vs. float, be satisfied with sub-millisec resolution assert abs(interpret(f, []) - os.stat(self.ufilename).st_mtime) < 1e-4 - else: + else: assert interpret(f, []) == os.stat(self.ufilename).st_mtime def test_access(self): diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -358,7 +358,6 @@ if isinstance(T, lltype.Ptr): if isinstance(T.TO, lltype.FuncType): - functype = ctypes.CFUNCTYPE if sys.platform == 'win32': from rpython.rlib.clibffi import FFI_STDCALL, FFI_DEFAULT_ABI From noreply at buildbot.pypy.org Fri Mar 28 08:22:02 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 28 Mar 2014 08:22:02 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: extend rpoll tests Message-ID: <20140328072202.37A011D2ADB@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-fixes4 Changeset: r70313:3995ab742fb8 Date: 2014-03-28 10:13 +0300 http://bitbucket.org/pypy/pypy/changeset/3995ab742fb8/ Log: extend rpoll tests diff --git a/rpython/rlib/test/test_rpoll.py b/rpython/rlib/test/test_rpoll.py --- a/rpython/rlib/test/test_rpoll.py +++ b/rpython/rlib/test/test_rpoll.py @@ -9,6 +9,16 @@ def setup_module(mod): rsocket_startup() +def one_in_event(events, fd): + assert len(events) == 1 + assert events[0][0] == fd + assert events[0][1] & POLLIN + +def one_out_event(events, fd): + assert len(events) == 1 + assert events[0][0] == fd + assert events[0][1] & POLLOUT + def test_simple(): serv = RSocket(AF_INET, SOCK_STREAM) serv.bind(INETAddress('127.0.0.1', INADDR_ANY)) @@ -24,18 +34,14 @@ assert err != 0 events = poll({serv.fd: POLLIN}, timeout=500) - assert len(events) == 1 - assert events[0][0] == serv.fd - assert events[0][1] & POLLIN + one_in_event(events, serv.fd) servconn_fd, cliaddr = serv.accept() servconn = RSocket(AF_INET, fd=servconn_fd) events = poll({serv.fd: POLLIN, cli.fd: POLLOUT}, timeout=500) - assert len(events) == 1 - assert events[0][0] == cli.fd - assert events[0][1] & POLLOUT + one_out_event(events, cli.fd) err = cli.connect_ex(servaddr) # win32: returns WSAEISCONN when the connection finally succeed. @@ -55,6 +61,70 @@ servconn.close() serv.close() +def test_exchange(): + serv = RSocket(AF_INET, SOCK_STREAM) + serv.bind(INETAddress('127.0.0.1', INADDR_ANY)) + serv.listen(1) + servaddr = serv.getsockname() + + events = poll({serv.fd: POLLIN}, timeout=100) + assert len(events) == 0 + + cli = RSocket(AF_INET, SOCK_STREAM) + cli.setblocking(False) + err = cli.connect_ex(servaddr) + assert err != 0 + + events = poll({serv.fd: POLLIN}, timeout=500) + one_in_event(events, serv.fd) + + servconn_fd, cliaddr = serv.accept() + servconn = RSocket(AF_INET, fd=servconn_fd) + + events = poll({serv.fd: POLLIN, + cli.fd: POLLOUT}, timeout=500) + one_out_event(events, cli.fd) + + #send some data + events = poll({cli.fd: POLLOUT}, timeout=500) + one_out_event(events, cli.fd) + cli.send("g'day, mate") + events = poll({servconn.fd: POLLIN}, timeout=500) + one_in_event(events, servconn.fd) + answer = servconn.recv(1024) + assert answer == "g'day, mate" + + #send a reply + events = poll({servconn.fd: POLLOUT}, timeout=500) + one_out_event(events, servconn.fd) + servconn.send("you mean hello?") + events = poll({cli.fd: POLLIN}, timeout=500) + one_in_event(events, cli.fd) + answer = cli.recv(1024) + assert answer == "you mean hello?" + + #send more data + events = poll({cli.fd: POLLOUT}, timeout=500) + one_out_event(events, cli.fd) + cli.send("sorry, wrong channel") + events = poll({servconn.fd: POLLIN}, timeout=500) + one_in_event(events, servconn.fd) + answer = servconn.recv(1024) + assert answer == "sorry, wrong channel" + + events = poll({servconn.fd: POLLOUT}, timeout=500) + one_out_event(events, servconn.fd) + servconn.send("np bye") + events = poll({cli.fd: POLLIN}, timeout=500) + one_in_event(events, cli.fd) + answer = cli.recv(1024) + assert answer == "np bye" + + cli.close() + servconn.close() + serv.close() + + def test_select(): if os.name == 'nt': py.test.skip('cannot select on file handles on windows') From noreply at buildbot.pypy.org Fri Mar 28 08:22:03 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 28 Mar 2014 08:22:03 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: ignore build artifact Message-ID: <20140328072203.AC2491D2ADB@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70314:2a672c888cae Date: 2014-03-28 10:16 +0300 http://bitbucket.org/pypy/pypy/changeset/2a672c888cae/ Log: ignore build artifact diff --git a/pypy/goal/python27.lib b/pypy/goal/python27.lib new file mode 100644 index 0000000000000000000000000000000000000000..a937a13c3128e34e7b099985354a183be9175bd3 GIT binary patch [cut] From noreply at buildbot.pypy.org Fri Mar 28 10:24:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Mar 2014 10:24:34 +0100 (CET) Subject: [pypy-commit] stmgc default: Mention in TODO some idea from Remi's technical report Message-ID: <20140328092434.5A0151D2575@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1111:f50166f43ba4 Date: 2014-03-28 10:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/f50166f43ba4/ Log: Mention in TODO some idea from Remi's technical report diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -1,6 +1,3 @@ - -known-working revision: 75893b92af4e - - use small uniform gcpages @@ -12,3 +9,5 @@ minor collections - fork() is done by copying the whole mmap non-lazily; improve. + +- contention.c: when pausing: should also tell other_pseg "please commit soon" diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -149,6 +149,8 @@ */ contmgr.other_pseg->signal_when_done = true; + /* XXX should also tell other_pseg "please commit soon" */ + dprintf(("pausing...\n")); cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE; From noreply at buildbot.pypy.org Fri Mar 28 10:59:03 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 28 Mar 2014 10:59:03 +0100 (CET) Subject: [pypy-commit] stmgc default: add a simple demo Message-ID: <20140328095903.183361D29B6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1112:72914bbde634 Date: 2014-03-28 10:59 +0100 http://bitbucket.org/pypy/stmgc/changeset/72914bbde634/ Log: add a simple demo diff --git a/c7/demo/demo_simple.c b/c7/demo/demo_simple.c new file mode 100644 --- /dev/null +++ b/c7/demo/demo_simple.c @@ -0,0 +1,108 @@ +#include +#include +#include +#include +#include + +#ifdef USE_HTM +# include "../../htm-c7/stmgc.h" +#else +# include "stmgc.h" +#endif + +#define ITERS 10000000 +#define NTHREADS 2 + + +typedef TLPREFIX struct node_s node_t; +typedef node_t* nodeptr_t; +typedef object_t* objptr_t; + +struct node_s { + struct object_s hdr; + long value; + nodeptr_t next; +}; + +__thread stm_thread_local_t stm_thread_local; + + +ssize_t stmcb_size_rounded_up(struct object_s *ob) +{ + return sizeof(struct node_s); +} + +void stmcb_trace(struct object_s *obj, void visit(object_t **)) +{ + struct node_s *n; + n = (struct node_s*)obj; + visit((object_t **)&n->next); +} + + + +static sem_t done; + +static __thread int tl_counter = 0; +static int gl_counter = 0; + +void *demo2(void *arg) +{ + int status; + stm_register_thread_local(&stm_thread_local); + tl_counter = 0; + + int i = 0; + while (i < ITERS) { + stm_start_inevitable_transaction(&stm_thread_local); + tl_counter++; + if (i % 5 == 0) + gl_counter++; + stm_commit_transaction(); + i++; + } + + assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); + + stm_unregister_thread_local(&stm_thread_local); + status = sem_post(&done); assert(status == 0); + return NULL; +} + + +void newthread(void*(*func)(void*), void *arg) +{ + pthread_t th; + int status = pthread_create(&th, NULL, func, arg); + if (status != 0) + abort(); + pthread_detach(th); + printf("started new thread\n"); +} + + + +int main(void) +{ + int status, i; + + status = sem_init(&done, 0, 0); assert(status == 0); + + stm_setup(); + stm_register_thread_local(&stm_thread_local); + + + for (i = 1; i <= NTHREADS; i++) { + newthread(demo2, (void*)(uintptr_t)i); + } + + for (i = 1; i <= NTHREADS; i++) { + status = sem_wait(&done); assert(status == 0); + } + + + stm_unregister_thread_local(&stm_thread_local); + stm_teardown(); + + return 0; +} diff --git a/duhton/demo/micro_transactions.duh b/duhton/demo/micro_transactions.duh --- a/duhton/demo/micro_transactions.duh +++ b/duhton/demo/micro_transactions.duh @@ -5,16 +5,23 @@ (defun increment () - ;;(set c (+ (get c) 1)) - (setq c 0) - (setq c (+ c 1)) ) +(defun big_transactions () + (setq n 0) + (while (< n 20000) + (transaction increment) + (setq n (+ n 1)) + ) + ) + (setq n 0) -(while (< n 10000000) - (transaction increment) +(while (< n 800) + (transaction big_transactions) (setq n (+ n 1)) ) +(setq timer (time)) (run-transactions) +(print (quote TIME_IN_PARALLEL:) (- (time) timer)) diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -2,6 +2,7 @@ #include #include #include "htm.h" +#include pthread_mutex_t _stm_gil = PTHREAD_MUTEX_INITIALIZER; __thread stm_thread_local_t *_stm_tloc; @@ -117,15 +118,17 @@ void stm_commit_transaction(void) { stm_collect(0); _stm_tloc = NULL; - if (mutex_locked(&_stm_gil)) { - assert(!xtest()); + if (_htm_info.use_gil) { + OPT_ASSERT(!xtest()); if (pthread_mutex_unlock(&_stm_gil) != 0) abort(); gil_transactions++; - //fprintf(stderr, "G"); + if (gil_transactions % 512 == 0) + fprintf(stderr, "G"); } else { xend(); htm_transactions++; - //fprintf(stderr, "H"); + if (htm_transactions % 512 == 0) + fprintf(stderr, "H"); } } @@ -201,7 +204,7 @@ static inline uintptr_t list_pop_item(struct list_s *lst) { - assert(lst->count > 0); + OPT_ASSERT(lst->count > 0); return lst->items[--lst->count]; } @@ -277,7 +280,7 @@ tl->thread_local_obj = NULL; tl->shadowstack_base = (object_t **)tl_malloc(768*1024); - assert(tl->shadowstack_base); + OPT_ASSERT(tl->shadowstack_base); tl->shadowstack = tl->shadowstack_base; tl->last_abort__bytes_in_nursery = 0; } @@ -304,7 +307,7 @@ object_t *_stm_allocate_old(ssize_t size) { char *p = tl_malloc(size); - assert(p); + OPT_ASSERT(p); memset(p, 0, size); ((object_t *)p)->gil_flags = _STM_GCFLAG_WRITE_BARRIER; return (object_t *)p; @@ -313,7 +316,7 @@ object_t *_stm_allocate_external(ssize_t size) { char *p = tl_malloc(size); - assert(p); + OPT_ASSERT(p); memset(p, 0, size); _stm_write_slowpath((object_t *)p); return (object_t *)p; @@ -367,7 +370,7 @@ size_t size = stmcb_size_rounded_up(obj); nobj = tl_malloc(size); - assert(nobj); + OPT_ASSERT(nobj); /* Copy the object */ memcpy(nobj, obj, size); @@ -400,10 +403,10 @@ static inline void _collect_now(object_t *obj) { - assert(!_is_in_nursery(obj)); + OPT_ASSERT(!_is_in_nursery(obj)); /* We must not have GCFLAG_WRITE_BARRIER so far. Add it now. */ - assert(!(obj->gil_flags & GCFLAG_WRITE_BARRIER)); + OPT_ASSERT(!(obj->gil_flags & GCFLAG_WRITE_BARRIER)); obj->gil_flags |= GCFLAG_WRITE_BARRIER; /* Trace the 'obj' to replace pointers to nursery with pointers @@ -430,7 +433,7 @@ { if (_stm_nursery_base == NULL) { _stm_nursery_base = tl_malloc(NURSERY_SIZE); - assert(_stm_nursery_base); + OPT_ASSERT(_stm_nursery_base); _stm_nursery_end = _stm_nursery_base + NURSERY_SIZE; _stm_nursery_current = _stm_nursery_base; } @@ -448,7 +451,7 @@ young_weakrefs, object_t * /*item*/, ({ - assert(_is_in_nursery(item)); + OPT_ASSERT(_is_in_nursery(item)); object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)item; /* the following checks are done like in nursery.c: */ @@ -459,11 +462,11 @@ item = pforwarded_array[1]; /* moved location */ - assert(!_is_in_nursery(item)); + OPT_ASSERT(!_is_in_nursery(item)); ssize_t size = 16; object_t *pointing_to = *WEAKREF_PTR(item, size); - assert(pointing_to != NULL); + OPT_ASSERT(pointing_to != NULL); if (_is_in_nursery(pointing_to)) { object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)pointing_to; @@ -504,7 +507,7 @@ char *p = _stm_nursery_current; char *end = p + size_rounded_up; - assert(end <= _stm_nursery_end); + OPT_ASSERT(end <= _stm_nursery_end); _stm_nursery_current = end; STM_SEGMENT->nursery_current = end; return (object_t *)p; @@ -512,7 +515,7 @@ object_t *stm_allocate_weakref(ssize_t size_rounded_up) { - assert(size_rounded_up == 16); + OPT_ASSERT(size_rounded_up == 16); object_t *obj = stm_allocate(size_rounded_up); LIST_APPEND(young_weakrefs, obj); return obj; From noreply at buildbot.pypy.org Fri Mar 28 13:17:07 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 28 Mar 2014 13:17:07 +0100 (CET) Subject: [pypy-commit] stmgc default: more statistics, try harder to avoid GIL Message-ID: <20140328121707.A0D591D29D0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1113:9afe044b265f Date: 2014-03-28 13:15 +0100 http://bitbucket.org/pypy/stmgc/changeset/9afe044b265f/ Log: more statistics, try harder to avoid GIL diff --git a/c7/demo/Makefile b/c7/demo/Makefile --- a/c7/demo/Makefile +++ b/c7/demo/Makefile @@ -33,6 +33,6 @@ release-htm-%: %.c ../../htm-c7/stmgc.? ../../htm-c7/htm.h - clang -I.. -pthread -g -DNDEBUG -O2 $< -o release-htm-$* ../../htm-c7/stmgc.c -Wall -DUSE_HTM + clang -I.. -pthread -g -O2 $< -o release-htm-$* ../../htm-c7/stmgc.c -Wall -DUSE_HTM diff --git a/c7/demo/demo_simple.c b/c7/demo/demo_simple.c --- a/c7/demo/demo_simple.c +++ b/c7/demo/demo_simple.c @@ -10,7 +10,7 @@ # include "stmgc.h" #endif -#define ITERS 10000000 +#define ITERS 1000000 #define NTHREADS 2 @@ -52,12 +52,15 @@ stm_register_thread_local(&stm_thread_local); tl_counter = 0; + object_t *tmp; int i = 0; while (i < ITERS) { stm_start_inevitable_transaction(&stm_thread_local); tl_counter++; - if (i % 5 == 0) - gl_counter++; + if (i % 500 < 250) + STM_PUSH_ROOT(stm_thread_local, stm_allocate(16));//gl_counter++; + else + STM_POP_ROOT(stm_thread_local, tmp); stm_commit_transaction(); i++; } diff --git a/htm-c7/htm.h b/htm-c7/htm.h --- a/htm-c7/htm.h +++ b/htm-c7/htm.h @@ -67,6 +67,7 @@ static __attribute__((__always_inline__)) inline int mutex_locked(pthread_mutex_t* mut) { + /* HACK: pthread internals! */ return !!mut->__data.__lock; } diff --git a/htm-c7/stmgc.c b/htm-c7/stmgc.c --- a/htm-c7/stmgc.c +++ b/htm-c7/stmgc.c @@ -9,7 +9,7 @@ //struct stm_segment_info_s _stm_segment; __thread struct stm_segment_info_s* _stm_segment; -#define TRANSIENT_RETRY_MAX 10 +#define TRANSIENT_RETRY_MAX 100 #define GIL_RETRY_MAX 10 #define ABORT_GIL_LOCKED 1 @@ -17,6 +17,10 @@ static __thread int gil_transactions = 0; static __thread int htm_transactions = 0; +static __thread int gil_retry_acquire = 0; +static __thread int gil_spin_retry_acquire = 0; +static __thread int transient_retry_acquire = 0; +static __thread int persistent_acquire = 0; __thread struct htm_transaction_info_s _htm_info __attribute__((aligned(64))); @@ -32,13 +36,14 @@ } static int spin_and_acquire_gil(stm_thread_local_t *tl) { - int n = 5; + int n = 500; while (n-- > 0) { if (!mutex_locked(&_stm_gil)) return 0; smp_spinloop(); } + gil_spin_retry_acquire++; acquire_gil(tl); return 1; } @@ -61,10 +66,10 @@ _htm_info.retry_counter = 0; _htm_info.use_gil = 0; - if (mutex_locked(&_stm_gil)) { - if (spin_and_acquire_gil(tl)) - return; - } + /* if (mutex_locked(&_stm_gil)) { */ + /* if (spin_and_acquire_gil(tl)) */ + /* return; */ + /* } */ int status; int transient_retry_counter = TRANSIENT_RETRY_MAX; @@ -84,7 +89,7 @@ /* adjust_transaction_length(pc) */ } - if (mutex_locked(&_stm_gil)) { + if ((status & XBEGIN_XABORT) && XBEGIN_XABORT_ARG(status) == ABORT_GIL_LOCKED) { gil_retry_counter--; if (gil_retry_counter > 0) { if (spin_and_acquire_gil(tl)) { @@ -94,8 +99,10 @@ goto transaction_retry; } } + gil_retry_acquire++; acquire_gil(tl); } else if (is_persistent(status)) { + persistent_acquire++; acquire_gil(tl); } else { /* transient abort */ @@ -105,6 +112,7 @@ smp_spinloop(); goto transaction_retry; } + transient_retry_acquire++; acquire_gil(tl); } @@ -287,9 +295,14 @@ void stm_unregister_thread_local(stm_thread_local_t *tl) { fprintf(stderr, - "in %p\ngil_transactions: %d\nhtm_transactions: %d\nratio: %f\n", + "== in %p ==\ngil_transactions:\t%d\nhtm_transactions:\t%d\nratio:\t\t\t=%f\n" + "gil_spin_retry_acquire:\t%d\n" + "gil_retry_acquire:\t%d\npersistent_acquire:\t%d\n" + "transient_retry_acquire:%d\n", tl, gil_transactions, htm_transactions, - (float)gil_transactions / (float)htm_transactions); + (float)gil_transactions / (float)htm_transactions, + gil_spin_retry_acquire, + gil_retry_acquire, persistent_acquire, transient_retry_acquire); //free(tl->shadowstack_base); list_free(objects_pointing_to_nursery); From noreply at buildbot.pypy.org Fri Mar 28 14:02:22 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 14:02:22 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Added some assert-isinstances to help RPython compile. Fixed some compile-errors. Message-ID: <20140328130222.58ECD1D29A7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r709:1e6f258b73d4 Date: 2014-03-27 18:57 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/1e6f258b73d4/ Log: Added some assert-isinstances to help RPython compile. Fixed some compile-errors. Made some more fields immutable. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -582,8 +582,9 @@ new_shadow.attach_shadow() def store_with_new_storage(self, new_storage, n0, w_val): - self.switch_shadow(new_storage(self.space(), self, self.size())) - self.store(self.space(), n0, w_val) + space = self.space() + self.switch_shadow(new_storage(space, self, self.size())) + self.store(space, n0, w_val) def space(self): assert self.shadow, "Cannot access space without a shadow!" diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -17,7 +17,6 @@ # Circumvent the constructor because nil is already referenced there. w_nil = instantiate(model.W_PointersObject) w_nil.w_class = None - w_nil.space = self self.add_bootstrap_object("w_nil", w_nil) self.make_bootstrap_classes() diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -2,7 +2,7 @@ from spyvm import model, constants, error, wrapper, version from spyvm.version import elidable_for_version, constant_for_version from rpython.tool.pairtype import extendabletype -from rpython.rlib import rarithmetic, jit, longlong2float +from rpython.rlib import rarithmetic, objectmodel, jit, longlong2float from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rstruct.runpack import runpack @@ -19,6 +19,7 @@ def __init__(self, space, w_self): self.space = space + assert isinstance(w_self, model.W_AbstractPointersObject) self._w_self = w_self def w_self(self): return self._w_self @@ -49,6 +50,7 @@ self.copy_field_from(i, other_shadow) class AbstractStorageShadow(AbstractShadow): + _attrs_ = [] repr_classname = "AbstractStorageShadow" def store(self, n0, w_val): if self.can_contain(w_val): @@ -58,9 +60,9 @@ def can_contain(self, w_val): return self.static_can_contain(self.space, w_val) def do_store(self, n0, w_val): - raise NotImplemtedError() + raise NotImplementedError() def generelized_strategy_for(self, w_val): - raise NotImplemtedError() + raise NotImplementedError() class AllNilStorageShadow(AbstractStorageShadow): repr_classname = "AllNilStorageShadow" @@ -85,8 +87,8 @@ class AbstractValueOrNilStorageMixin(object): # Class must provide: wrap, unwrap, nil_value, is_nil_value, wrapper_class - storage = [] _attrs_ = ['storage'] + _immutable_fields_ = ['storage'] def __init__(self, space, w_self, size): AbstractStorageShadow.__init__(self, space, w_self) @@ -106,13 +108,13 @@ return self.wrap(self.space, val) def do_store(self, n0, w_val): - store = self.storage if w_val == self.space.w_nil: - store[n0] = self.nil_value + self.storage[n0] = self.nil_value else: - store[n0] = self.unwrap(self.space, w_val) + self.storage[n0] = self.unwrap(self.space, w_val) # This is to avoid code duplication + at objectmodel.specialize.arg(0) def _value_or_nil_can_handle(cls, space, w_val): return w_val == space.w_nil or \ (isinstance(w_val, cls.wrapper_class) \ @@ -189,6 +191,7 @@ class ListStorageShadow(AbstractShadow): _attrs_ = ['storage'] + _immutable_fields_ = ['storage'] repr_classname = "ListStorageShadow" def __init__(self, space, w_self, size): @@ -210,6 +213,7 @@ class WeakListStorageShadow(AbstractShadow): _attrs_ = ['storage'] + _immutable_fields_ = ['storage'] repr_classname = "WeakListStorageShadow" def __init__(self, space, w_self, size): @@ -279,6 +283,7 @@ self._s_methoddict.s_class = self elif n0 == constants.CLASS_FORMAT_INDEX: # read and painfully decode the format + assert isinstance(w_val, model.W_SmallInteger) classformat = self.space.unwrap_int(w_val) # The classformat in Squeak, as an integer value, is: # <2 bits=instSize//64><5 bits=cClass><4 bits=instSpec> @@ -1171,6 +1176,7 @@ repr_classname = "CompiledMethodShadow" def __init__(self, w_compiledmethod, space): + assert isinstance(w_compiledmethod, model.W_CompiledMethod) self._w_self = w_compiledmethod self.space = space self.update() From noreply at buildbot.pypy.org Fri Mar 28 14:02:23 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 14:02:23 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: _w_self is not immutable after all, due to become: and object-less context-shadows :( Message-ID: <20140328130223.84DA11D29A7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r710:5f221f698a0f Date: 2014-03-27 19:38 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/5f221f698a0f/ Log: _w_self is not immutable after all, due to become: and object-less context-shadows :( diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -13,13 +13,13 @@ can be attached at run-time to any Smalltalk object. """ _attrs_ = ['_w_self', 'space'] - _immutable_fields_ = ['_w_self', 'space'] + _immutable_fields_ = ['space'] provides_getname = False repr_classname = "AbstractShadow" def __init__(self, space, w_self): self.space = space - assert isinstance(w_self, model.W_AbstractPointersObject) + assert w_self is None or isinstance(w_self, model.W_AbstractPointersObject) self._w_self = w_self def w_self(self): return self._w_self @@ -870,9 +870,11 @@ if self._w_self is not None: return self._w_self else: - size = self.size() - self.space.w_MethodContext.as_class_get_shadow(self.space).instsize() + s_MethodContext = self.space.w_MethodContext.as_class_get_shadow(self.space) + size = self.size() - s_MethodContext.instsize() space = self.space - w_self = space.w_MethodContext.as_class_get_shadow(space).new(size) + w_self = s_MethodContext.new(size) + assert isinstance(w_self, model.W_PointersObject) w_self.store_shadow(self) self._w_self = w_self self._w_self_size = w_self.size() From noreply at buildbot.pypy.org Fri Mar 28 14:02:24 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 14:02:24 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Replaced some asserts with PrimitiveFailedErrors Message-ID: <20140328130224.AE5E51D29A7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r711:04f97fb06cbf Date: 2014-03-27 20:48 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/04f97fb06cbf/ Log: Replaced some asserts with PrimitiveFailedErrors diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -21,6 +21,10 @@ # n0 cannot be negative return n0 +def assert_pointers(w_obj): + if not isinstance(w_obj, model.W_PointersObject): + raise PrimitiveFailedError + # ___________________________________________________________________________ # Primitive table: it is filled in at initialization time with the # primitive functions. Each primitive function takes two @@ -462,7 +466,7 @@ @expose_primitive(NEW, unwrap_spec=[object]) def func(interp, s_frame, w_cls): - assert isinstance(w_cls, model.W_PointersObject) + assert_pointers(w_cls) s_class = w_cls.as_class_get_shadow(interp.space) if s_class.isvariable(): raise PrimitiveFailedError() @@ -470,7 +474,7 @@ @expose_primitive(NEW_WITH_ARG, unwrap_spec=[object, int]) def func(interp, s_frame, w_cls, size): - assert isinstance(w_cls, model.W_PointersObject) + assert_pointers(w_cls) s_class = w_cls.as_class_get_shadow(interp.space) if not s_class.isvariable() and size != 0: raise PrimitiveFailedError() @@ -488,9 +492,7 @@ "Fetches a fixed field from the object, and fails otherwise" s_class = w_rcvr.class_shadow(interp.space) assert_bounds(n0, 0, s_class.instsize()) - # only pointers have non-0 size - # XXX Now MethodContext is still own format, leave - #assert isinstance(w_rcvr, model.W_PointersObject) + assert_pointers(w_rcvr) return w_rcvr.fetch(interp.space, n0) @expose_primitive(INST_VAR_AT_PUT, unwrap_spec=[object, index1_0, object]) @@ -498,8 +500,7 @@ "Stores a value into a fixed field from the object, and fails otherwise" s_class = w_rcvr.class_shadow(interp.space) assert_bounds(n0, 0, s_class.instsize()) - # XXX Now MethodContext is still own format, leave - #assert isinstance(w_rcvr, model.W_PointersObject) + assert_pointers(w_rcvr) w_rcvr.store(interp.space, n0, w_value) return w_value @@ -512,8 +513,7 @@ @expose_primitive(STORE_STACKP, unwrap_spec=[object, int]) def func(interp, s_frame, w_frame, stackp): assert stackp >= 0 - if not isinstance(w_frame, model.W_PointersObject): - raise PrimitiveFailedError + assert_pointers(w_frame) w_frame.store(interp.space, constants.CTXPART_STACKP_INDEX, interp.space.wrap_int(stackp)) return w_frame @@ -909,7 +909,7 @@ s_cm = w_rcvr.as_compiledmethod_get_shadow(interp.space) w_class = s_cm.w_compiledin if w_class: - assert isinstance(w_class, model.W_PointersObject) + assert_pointers(w_class) w_class.as_class_get_shadow(interp.space).flush_method_caches() return w_rcvr @@ -1282,7 +1282,7 @@ # the new BlockContext's home context. Otherwise, the home # context of the receiver is used for the new BlockContext. # Note that in our impl, MethodContext.w_home == self - assert isinstance(w_context, model.W_PointersObject) + assert_pointers(w_context) w_method_context = w_context.as_context_get_shadow(interp.space).w_home() # The block bytecodes are stored inline: so we skip past the @@ -1318,7 +1318,7 @@ interp.space.w_BlockContext): raise PrimitiveFailedError() - assert isinstance(w_block_ctx, model.W_PointersObject) + assert_pointers(w_block_ctx) s_block_ctx = w_block_ctx.as_blockcontext_get_shadow(interp.space) @@ -1341,7 +1341,7 @@ result_is_new_frame=True) def func(interp, s_frame, w_block_ctx, args_w): - assert isinstance(w_block_ctx, model.W_PointersObject) + assert_pointers(w_block_ctx) s_block_ctx = w_block_ctx.as_blockcontext_get_shadow(interp.space) exp_arg_cnt = s_block_ctx.expected_argument_count() @@ -1434,8 +1434,7 @@ @expose_primitive(FLUSH_CACHE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - if not isinstance(w_rcvr, model.W_PointersObject): - raise PrimitiveFailedError() + assert_pointers(w_rcvr) s_class = w_rcvr.as_class_get_shadow(interp.space) s_class.flush_method_caches() return w_rcvr From noreply at buildbot.pypy.org Fri Mar 28 14:02:25 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 14:02:25 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Made the different *size() methods in model module more consistent. Removed space parameter. Added a constant. Message-ID: <20140328130225.D75851D29A7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r712:ab97a4252ed3 Date: 2014-03-27 20:52 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/ab97a4252ed3/ Log: Made the different *size() methods in model module more consistent. Removed space parameter. Added a constant. diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -59,6 +59,7 @@ LITERAL_START = 1 # index of the first literal after the method header BYTES_PER_WORD = 4 +WORDS_IN_FLOAT = 2 # Fixed number of word-slots in a Squeak Float object # ___________________________________________________________________________ # Special objects indices diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -192,13 +192,7 @@ @expose_on_virtual_machine_proxy([oop], int) def byteSizeOf(w_object): - s_class = w_object.class_shadow(IProxy.space) - size = s_class.instsize() - if s_class.isvariable(): - size += w_object.varsize(IProxy.space) - if not isinstance(w_object, model.W_BytesObject): - size *= 4 - return size + return w_object.bytesize(IProxy.space) @expose_on_virtual_machine_proxy([int, oop], list) def fetchArrayofObject(fieldIndex, w_object): @@ -308,7 +302,7 @@ @expose_on_virtual_machine_proxy([oop], int) def stSizeOf(w_object): - return w_object.varsize(IProxy.space) + return w_object.varsize() @expose_on_virtual_machine_proxy([int, oop, int], oop) def storeIntegerofObjectwithValue(n0, w_object, a): @@ -727,14 +721,13 @@ # #if VM_PROXY_MINOR > 5 @expose_on_virtual_machine_proxy([oop], bool, minor=5) def isArray(w_object): + # TODO - are ByteObjects and WordObjects not considered Arrays? + # What are the exact semantics of this? Should only the class Array return true? if not isinstance(w_object, model.W_PointersObject): return False space = IProxy.space s_class = w_object.class_shadow(space) - if s_class.instsize() == 0 and s_class.isvariable(): - return True - else: - return False + return s_class.instsize() == 0 and s_class.isvariable() @expose_on_virtual_machine_proxy([], int, minor=5) def forceInterruptCheck(): @@ -1156,7 +1149,7 @@ # # Class extensions for Array conversion # class __extend__(model.W_PointersObject): # def as_c_array(self, proxy): -# return map(lambda x: proxy.object_to_oop(x), self.vars[self.instsize(space):]) +# return map(lambda x: proxy.object_to_oop(x), self.vars[self.instsize():]) # class __extend__(model.W_BytesObject): # def as_c_array(self, proxy): diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -32,26 +32,35 @@ _attrs_ = [] # no RPython-level instance variables allowed in W_Object _settled_ = True repr_classname = "W_Object" + bytes_per_slot = constants.BYTES_PER_WORD def size(self): + """Return the number of "slots" or "items" in the receiver object. + This means different things for different objects. + For ByteObject, this means the number of bytes, for WordObject the number of words, + for PointerObject the number of pointers (regardless if it's varsized or not). + """ + return 0 + + def instsize(self): + """Return the number of slots of the object reserved for instance variables (not number of bytes). + Only returns something non-zero for W_PointersObjects, + because all other classes in this model hierarchy represent varsized classes (except for SmallInteger).""" + return 0 + + def varsize(self): + """Return number of slots in the of variable-sized part (not number of bytes). + Not necessarily number of bytes. + Variable sized objects are those created with #new:.""" + return self.size() - self.instsize() + + def bytesize(self): """Return bytesize that conforms to Blue Book. The reported size may differ from the actual size in Spy's object space, as memory representation varies depending on PyPy translation.""" - return 0 - - def instsize(self, space): - """Return the size of the object reserved for instance variables. - Only returns something non-zero for W_PointersObjects, W_Floats, and - W_LargePositiveInteger1Words""" - return 0 - - def varsize(self, space): - """Return bytesize of variable-sized part. - - Variable sized objects are those created with #new:.""" - return self.size() - + return self.size() * self.bytes_per_slot + def getclass(self, space): """Return Squeak class.""" raise NotImplementedError() @@ -279,6 +288,7 @@ """Large positive integer for exactly 1 word""" _attrs_ = ["value", "_exposed_size"] repr_classname = "W_LargePositiveInteger1Word" + bytes_per_slot = 1 def __init__(self, value, size=4): self.value = intmask(value) @@ -348,10 +358,10 @@ new_value = self.value & r_uint(~(0xff << skew)) new_value |= r_uint(byte << skew) self.value = intmask(new_value) - + def size(self): return self._exposed_size - + def invariant(self): return isinstance(self.value, int) @@ -450,7 +460,7 @@ self.value = float_unpack(r, 8) def size(self): - return 2 + return constants.WORDS_IN_FLOAT @signature.finishsigs class W_AbstractObjectWithClassReference(W_AbstractObjectWithIdentityHash): @@ -603,7 +613,7 @@ shadow_info = self.shadow.__repr__() if self.shadow.provides_getname: name = self._get_shadow().getname() - return '(%s) len=%d %s' % (shadow_info, self.size(), name) + return '(%s) len=%d [%s]' % (shadow_info, self.size(), name) def fetch_all(self, space): return [self.fetch(space, i) for i in range(self.size())] @@ -623,11 +633,11 @@ def at0(self, space, index0): # To test, at0 = in varsize part - return self.fetch(space, index0+self.instsize(space)) + return self.fetch(space, index0 + self.instsize()) def atput0(self, space, index0, w_value): # To test, at0 = in varsize part - self.store(space, index0 + self.instsize(space), w_value) + self.store(space, index0 + self.instsize(), w_value) def fetch(self, space, n0): return self._get_shadow().fetch(n0) @@ -635,16 +645,11 @@ def store(self, space, n0, w_value): return self._get_shadow().store(n0, w_value) - def varsize(self, space): - return self.size() - self.instsize(space) - - def instsize(self, space): - return self.class_shadow(space).instsize() - def size(self): - if not self.shadow: - return 0 return self._get_shadow().size() + + def instsize(self): + return self.class_shadow(self.space()).instsize() def store_shadow(self, shadow): self.shadow = shadow @@ -746,6 +751,7 @@ _attrs_ = ['bytes', 'c_bytes', '_size'] _immutable_fields_ = ['_size', 'bytes[*]?'] repr_classname = 'W_BytesObject' + bytes_per_slot = 1 def __init__(self, space, w_class, size): W_AbstractObjectWithClassReference.__init__(self, space, w_class) @@ -1032,7 +1038,7 @@ def size(self): return self._realsize - + def invariant(self): return False @@ -1245,9 +1251,14 @@ hasattr(self, 'primitive') and self.primitive is not None) - def size(self): + def bytesize(self, space): + # This is very special: words and bytes are mixed here. return self.headersize() + self.getliteralsize() + len(self.bytes) + def size(self): + # One word for the header. + return 1 + self.literalsize + len(self.bytes) + def gettempsize(self): return self.tempsize diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -15,7 +15,7 @@ raise PrimitiveFailedError() def assert_valid_index(space, n0, w_obj): - if not 0 <= n0 < w_obj.varsize(space): + if not 0 <= n0 < w_obj.varsize(): raise PrimitiveFailedError() # return the index, since from here on the annotator knows that # n0 cannot be negative @@ -406,7 +406,7 @@ def func(interp, s_frame, w_obj): if not w_obj.class_shadow(interp.space).isvariable(): raise PrimitiveFailedError() - return interp.space.wrap_int(w_obj.varsize(interp.space)) + return interp.space.wrap_int(w_obj.varsize()) @expose_primitive(STRING_AT, unwrap_spec=[object, index1_0]) def func(interp, s_frame, w_obj, n0): @@ -765,8 +765,8 @@ # might be different (e.g. Symbol and ByteString) if w_rcvr.__class__ is not w_replacement.__class__: raise PrimitiveFailedError - if (w_rcvr.size() - w_rcvr.instsize(interp.space) <= stop - or w_replacement.size() - w_replacement.instsize(interp.space) <= repStart + (stop - start)): + if (w_rcvr.size() - w_rcvr.instsize() <= stop + or w_replacement.size() - w_replacement.instsize() <= repStart + (stop - start)): raise PrimitiveFailedError() repOff = repStart - start for i0 in range(start, stop + 1): From noreply at buildbot.pypy.org Fri Mar 28 14:02:26 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 14:02:26 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: grammar. Message-ID: <20140328130226.ED3FD1D29A7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r713:d8c4663046e4 Date: 2014-03-27 20:54 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/d8c4663046e4/ Log: grammar. diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -55,13 +55,13 @@ def store(self, n0, w_val): if self.can_contain(w_val): return self.do_store(n0, w_val) - new_storage = self.generelized_strategy_for(w_val) + new_storage = self.generalized_strategy_for(w_val) return self._w_self.store_with_new_storage(new_storage, n0, w_val) def can_contain(self, w_val): return self.static_can_contain(self.space, w_val) def do_store(self, n0, w_val): raise NotImplementedError() - def generelized_strategy_for(self, w_val): + def generalized_strategy_for(self, w_val): raise NotImplementedError() class AllNilStorageShadow(AbstractStorageShadow): @@ -79,7 +79,7 @@ pass def size(self): return self._size - def generelized_strategy_for(self, w_val): + def generalized_strategy_for(self, w_val): return find_storage_for_objects(self.space, [w_val]) @staticmethod def static_can_contain(space, w_val): @@ -97,7 +97,7 @@ def size(self): return len(self.storage) - def generelized_strategy_for(self, w_val): + def generalized_strategy_for(self, w_val): return ListStorageShadow def fetch(self, n0): From noreply at buildbot.pypy.org Fri Mar 28 14:02:28 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 14:02:28 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Fixed some rpython errors. Message-ID: <20140328130228.29A961D29A7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r714:31857c8f49b5 Date: 2014-03-28 11:23 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/31857c8f49b5/ Log: Fixed some rpython errors. diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -192,7 +192,7 @@ @expose_on_virtual_machine_proxy([oop], int) def byteSizeOf(w_object): - return w_object.bytesize(IProxy.space) + return w_object.bytesize() @expose_on_virtual_machine_proxy([int, oop], list) def fetchArrayofObject(fieldIndex, w_object): diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -646,6 +646,10 @@ return self._get_shadow().store(n0, w_value) def size(self): + if not self.has_shadow(): + # TODO - this happens only for objects bootstrapped in ObjSpace. + # Think of a way to avoid this check. Usually, self.shadow is never None. + return 0 return self._get_shadow().size() def instsize(self): @@ -1142,6 +1146,7 @@ """ repr_classname = "W_CompiledMethod" + bytes_per_slot = 1 _immutable_fields_ = ["_shadow?"] _attrs_ = ["bytes", "_likely_methodname", "header", "argsize", "primitive", "literals", "tempsize", "literalsize", "islarge", "_shadow"] @@ -1251,13 +1256,8 @@ hasattr(self, 'primitive') and self.primitive is not None) - def bytesize(self, space): - # This is very special: words and bytes are mixed here. + def size(self): return self.headersize() + self.getliteralsize() + len(self.bytes) - - def size(self): - # One word for the header. - return 1 + self.literalsize + len(self.bytes) def gettempsize(self): return self.tempsize diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -24,6 +24,7 @@ def assert_pointers(w_obj): if not isinstance(w_obj, model.W_PointersObject): raise PrimitiveFailedError + return w_obj # ___________________________________________________________________________ # Primitive table: it is filled in at initialization time with the @@ -466,7 +467,7 @@ @expose_primitive(NEW, unwrap_spec=[object]) def func(interp, s_frame, w_cls): - assert_pointers(w_cls) + w_cls = assert_pointers(w_cls) s_class = w_cls.as_class_get_shadow(interp.space) if s_class.isvariable(): raise PrimitiveFailedError() @@ -474,7 +475,7 @@ @expose_primitive(NEW_WITH_ARG, unwrap_spec=[object, int]) def func(interp, s_frame, w_cls, size): - assert_pointers(w_cls) + w_cls = assert_pointers(w_cls) s_class = w_cls.as_class_get_shadow(interp.space) if not s_class.isvariable() and size != 0: raise PrimitiveFailedError() @@ -492,7 +493,7 @@ "Fetches a fixed field from the object, and fails otherwise" s_class = w_rcvr.class_shadow(interp.space) assert_bounds(n0, 0, s_class.instsize()) - assert_pointers(w_rcvr) + w_cls = assert_pointers(w_rcvr) return w_rcvr.fetch(interp.space, n0) @expose_primitive(INST_VAR_AT_PUT, unwrap_spec=[object, index1_0, object]) @@ -500,7 +501,7 @@ "Stores a value into a fixed field from the object, and fails otherwise" s_class = w_rcvr.class_shadow(interp.space) assert_bounds(n0, 0, s_class.instsize()) - assert_pointers(w_rcvr) + w_rcvr = assert_pointers(w_rcvr) w_rcvr.store(interp.space, n0, w_value) return w_value @@ -513,7 +514,7 @@ @expose_primitive(STORE_STACKP, unwrap_spec=[object, int]) def func(interp, s_frame, w_frame, stackp): assert stackp >= 0 - assert_pointers(w_frame) + w_frame = assert_pointers(w_frame) w_frame.store(interp.space, constants.CTXPART_STACKP_INDEX, interp.space.wrap_int(stackp)) return w_frame @@ -909,7 +910,7 @@ s_cm = w_rcvr.as_compiledmethod_get_shadow(interp.space) w_class = s_cm.w_compiledin if w_class: - assert_pointers(w_class) + w_class = assert_pointers(w_class) w_class.as_class_get_shadow(interp.space).flush_method_caches() return w_rcvr @@ -1282,7 +1283,7 @@ # the new BlockContext's home context. Otherwise, the home # context of the receiver is used for the new BlockContext. # Note that in our impl, MethodContext.w_home == self - assert_pointers(w_context) + w_context = assert_pointers(w_context) w_method_context = w_context.as_context_get_shadow(interp.space).w_home() # The block bytecodes are stored inline: so we skip past the @@ -1318,8 +1319,7 @@ interp.space.w_BlockContext): raise PrimitiveFailedError() - assert_pointers(w_block_ctx) - + w_block_ctx = assert_pointers(w_block_ctx) s_block_ctx = w_block_ctx.as_blockcontext_get_shadow(interp.space) exp_arg_cnt = s_block_ctx.expected_argument_count() @@ -1341,7 +1341,7 @@ result_is_new_frame=True) def func(interp, s_frame, w_block_ctx, args_w): - assert_pointers(w_block_ctx) + w_block_ctx = assert_pointers(w_block_ctx) s_block_ctx = w_block_ctx.as_blockcontext_get_shadow(interp.space) exp_arg_cnt = s_block_ctx.expected_argument_count() @@ -1434,7 +1434,7 @@ @expose_primitive(FLUSH_CACHE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - assert_pointers(w_rcvr) + w_rcvr = assert_pointers(w_rcvr) s_class = w_rcvr.as_class_get_shadow(interp.space) s_class.flush_method_caches() return w_rcvr From noreply at buildbot.pypy.org Fri Mar 28 14:02:29 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 14:02:29 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Moved storage-statistics to own module, integrated with storage code and main target. Added test. Message-ID: <20140328130229.606351D29A7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r715:0c8b9379f9d5 Date: 2014-03-28 14:01 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/0c8b9379f9d5/ Log: Moved storage-statistics to own module, integrated with storage code and main target. Added test. Added safety-assert to prevent storage- access during bootstrapping phase (specialized storage like ClassShadows cannot be used before the objects have been filled in). Made store_w_superclass and store_w_methoddict of ClassShadow slightly more consistent. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -15,7 +15,7 @@ that create W_PointersObjects of correct size with attached shadows. """ import sys, weakref -from spyvm import constants, error, version +from spyvm import constants, error, version, storage_statistics from spyvm.version import elidable_for_version from rpython.rlib import rrandom, objectmodel, jit, signature @@ -24,7 +24,6 @@ from rpython.tool.pairtype import extendabletype from rpython.rlib.objectmodel import instantiate, compute_hash, import_from_mixin, we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib.listsort import TimSort from rsdl import RSDL, RSDL_helper class W_Object(object): @@ -468,6 +467,7 @@ Float).""" _attrs_ = ['w_class'] repr_classname = "W_AbstractObjectWithClassReference" + w_class = None def __init__(self, space, w_class): if w_class is not None: # it's None only for testing and space generation @@ -490,10 +490,12 @@ def guess_classname(self): if self.has_class(): - class_shadow = self.class_shadow(self.w_class.space()) - # Three question marks, because it would be highly irregular to have - # an initialized ClassShadow without an initialized name field. - return class_shadow.name or "???" + if self.w_class.has_shadow(): + class_shadow = self.class_shadow(self.w_class.space()) + return class_shadow.name + else: + # We cannot access the class during the initialization sequence. + return "?? (class not initialized)" else: return "? (no class)" @@ -517,50 +519,12 @@ assert w_class is not None return w_class.as_class_get_shadow(space) -class StatsSorter(TimSort): - def lt(self, a, b): - if a[0] == b[0]: - if a[1] == b[1]: - return a[2] < b[2] - else: - return a[1] < b[1] - else: - return a[0] < b[0] -class StrategyStatistics(object): - # Key: (operation_name, old_strategy, new_strategy) - # Value: [sizes] - stats = {} - do_log = False - do_stats = False - do_stats_sizes = False - - def stat_operation(self, operation_name, old_strategy, new_strategy, size): - key = (operation_name, old_strategy, new_strategy) - if not key in self.stats: - self.stats[key] = [] - self.stats[key].append(size) - def log_operation(self, op, new_strategy_tag, old_strategy_tag, classname, size): - print "%s (%s, was %s) of %s size %d" % (op, new_strategy_tag, old_strategy_tag, classname, size) - def sorted_keys(self): - keys = [ x for x in self.stats ] - StatsSorter(keys).sort() - return keys - def print_stats(self): - for key in self.sorted_keys(): - sizes = self.stats[key] - sum = 0 - for s in sizes: - sum += s - print "%s: %d times, avg size: %d" % (key, len(sizes), sum/len(sizes)) - if self.do_stats_sizes: - print " All sizes: %s" % sizes -strategy_stats = StrategyStatistics() - class W_AbstractPointersObject(W_AbstractObjectWithClassReference): """Common object.""" _attrs_ = ['shadow'] shadow = None repr_classname = "W_AbstractPointersObject" + log_storage = storage_statistics.log @jit.unroll_safe def __init__(self, space, w_class, size): @@ -570,6 +534,7 @@ def initialize_storage(self, space, size): self.store_shadow(self.empty_storage(space, size)) + self.log_storage("Initialized") def fillin(self, space, g_self): W_AbstractObjectWithClassReference.fillin(self, space, g_self) @@ -579,17 +544,28 @@ pointers = g_self.get_pointers() self.store_shadow(self.storage_for_list(space, pointers)) self.store_all(space, pointers) + self.log_storage("Filledin", log_classname=False) def empty_storage(self, space, size): raise NotImplementedError() def storage_for_list(self, space, vars): raise NotImplementedError() + def assert_shadow(self): + # Failing the following assert most likely indicates a bug. The shadow can only be absent during + # the bootstrapping sequence. It will be initialized in the fillin() method. Before that, it should + # not be switched to a specialized shadow, and the space is also not yet available here! Otherwise, + # the specialized shadow will attempt to read information from an uninitialized object. + shadow = self.shadow + assert shadow, "The shadow has not been initialized yet!" + return shadow + def switch_shadow(self, new_shadow): - if self.shadow is not None: - new_shadow.copy_from(self.shadow) + old_shadow = self.assert_shadow() + new_shadow.copy_from(old_shadow) self.store_shadow(new_shadow) new_shadow.attach_shadow() + self.log_storage("Switched", old_shadow) def store_with_new_storage(self, new_storage, n0, w_val): space = self.space() @@ -597,8 +573,7 @@ self.store(space, n0, w_val) def space(self): - assert self.shadow, "Cannot access space without a shadow!" - return self.shadow.space + return self.assert_shadow().space def __str__(self): if self.has_shadow() and self.shadow.provides_getname: diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -263,8 +263,8 @@ _attrs_ = ["name", "_instance_size", "instance_varsized", "instance_kind", "_s_methoddict", "_s_superclass", "subclass_s"] - name = '??' - _s_superclass = None + name = '??? (incomplete class info)' + _s_superclass = _s_methoddict = None provides_getname = True repr_classname = "ClassShadow" @@ -277,10 +277,7 @@ if n0 == constants.CLASS_SUPERCLASS_INDEX: self.store_w_superclass(w_val) elif n0 == constants.CLASS_METHODDICT_INDEX: - assert isinstance(w_val, model.W_PointersObject) - if not w_val.is_same_object(self.space.w_nil): - self._s_methoddict = w_val.as_methoddict_get_shadow(self.space) - self._s_methoddict.s_class = self + self.store_w_methoddict(w_val) elif n0 == constants.CLASS_FORMAT_INDEX: # read and painfully decode the format assert isinstance(w_val, model.W_SmallInteger) @@ -340,18 +337,33 @@ self.changed() def store_w_superclass(self, w_class): + superclass = self._s_superclass if w_class is None or w_class.is_same_object(self.space.w_nil): + if superclass: superclass.detach_s_class(self) self._s_superclass = None else: assert isinstance(w_class, model.W_PointersObject) - s_scls = w_class.as_class_get_shadow(self.space) - if self._s_superclass is s_scls: + s_new_superclass = w_class.as_class_get_shadow(self.space) + if superclass is s_new_superclass: return - if self._s_superclass is not None: - self._s_superclass.detach_s_class(self) - self._s_superclass = s_scls - self._s_superclass.attach_s_class(self) + if superclass: superclass.detach_s_class(self) + self._s_superclass = s_new_superclass + s_new_superclass.attach_s_class(self) + def store_w_methoddict(self, w_methoddict): + methoddict = self._s_methoddict + if w_methoddict is None or w_methoddict.is_same_object(self.space.w_nil): + if methoddict: methoddict.s_class = None + self._s_methoddict = None + else: + assert isinstance(w_methoddict, model.W_PointersObject) + s_new_methoddict = w_methoddict.as_methoddict_get_shadow(self.space) + if methoddict is s_new_methoddict: + return + if methoddict: methoddict.s_class = None + self._s_methoddict = s_new_methoddict + self._s_methoddict.s_class = self + def attach_s_class(self, s_other): self.subclass_s[s_other] = None @@ -406,7 +418,7 @@ return self._s_superclass def getname(self): - return self.name or '?' + return self.name # _______________________________________________________________ # Methods for querying the format word, taken from the blue book: diff --git a/spyvm/storage_statistics.py b/spyvm/storage_statistics.py new file mode 100644 --- /dev/null +++ b/spyvm/storage_statistics.py @@ -0,0 +1,92 @@ + +from rpython.rlib.listsort import TimSort + +class StatsSorter(TimSort): + """Sort a tuple of 3 strings""" + def lt(self, a, b): + if a[0] == b[0]: + if a[1] == b[1]: + return a[2] < b[2] + else: + return a[1] < b[1] + else: + return a[0] < b[0] + +class StorageStatistics(object): + # Key: (operation_name, old_storage, new_storage) + # Value: [sizes] + stats = {} + + do_log = False + do_stats = False + do_stats_sizes = False + + def log(self, w_obj, operation, old_storage_object, log_classname): + if self.do_log or self.do_stats: + new_storage = w_obj.shadow.repr_classname + if old_storage_object: + old_storage = old_storage_object.repr_classname + else: + old_storage = None + size = w_obj.size() + + key = self.make_key(operation, old_storage, new_storage) + if _stats.do_stats: + self.stat_operation(key, size) + if self.do_log: + if log_classname: + classname = w_obj.guess_classname() + else: + classname = None + self.log_operation(key, size, classname) + + def make_key(self, operation, old_storage, new_storage): + return (operation, old_storage, new_storage) + + def stat_operation(self, key, size): + if not key in self.stats: + self.stats[key] = [] + self.stats[key].append(size) + + def log_operation(self, key, size, classname): + print self.log_operation_string(key, size, classname) + + def key_string(self, key): + if key[1]: + return "%s (%s -> %s)" % (key[0], key[1], key[2]) + else: + return "%s (%s)" % (key[0], key[2]) + + def log_operation_string(self, key, size, classname): + if classname: + return "%s of %s size %d" % (self.key_string(key), classname, size) + else: + return "%s size %d" % (self.key_string(key), size) + + def sorted_keys(self): + keys = [ x for x in self.stats ] + StatsSorter(keys).sort() + return keys + + def print_stats(self): + for key in self.sorted_keys(): + sizes = self.stats[key] + sum = 0 + for s in sizes: sum += s + print "%s: %d times, avg size: %f" % (self.key_string(key), len(sizes), sum/len(sizes)) + if self.do_stats_sizes: + print " All sizes: %s" % sizes + +_stats = StorageStatistics() + +def activate_statistics(log=False, statistics=False, statstics_sizes=False): + _stats.do_log = _stats.do_log or log + _stats.do_stats = _stats.do_stats or statistics + _stats.do_stats_sizes = _stats.do_stats_sizes or statstics_sizes + +def print_statistics(): + if _stats.do_stats: + _stats.print_stats() + +def log(w_obj, operation, old_storage=None, log_classname=True): + _stats.log(w_obj, operation, old_storage, log_classname) diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -369,7 +369,7 @@ def test_primitive_perform_with_args(): from spyvm.test.test_primitives import _prim w_o = space.wrap_list([1, 2, 3]) - w_methoddict = w_o.class_shadow(space)._s_superclass._s_superclass.w_methoddict() + w_methoddict = w_o.class_shadow(space).s_superclass().s_superclass().w_methoddict() w_methoddict.as_methoddict_get_shadow(space).sync_method_cache() selectors_w = w_methoddict.shadow.methoddict.keys() w_sel = None diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -1,5 +1,5 @@ import py -from spyvm import wrapper, model, interpreter, shadow +from spyvm import wrapper, model, interpreter, shadow, storage_statistics from spyvm.error import WrapperException, FatalError from .util import read_image, copy_to_module, cleanup_module @@ -176,20 +176,27 @@ assert isinstance(a.shadow, shadow.ListStorageShadow) check_arr(a, [1.2, 2, w_nil, w_nil, w_nil]) -def test_statistics(): - stats = model.StrategyStatistics() - stats.stat_operation("B", "old", "new", 3) - stats.stat_operation("B", "old", "new", 4) - stats.stat_operation("B", "old2", "new2", 20) - stats.stat_operation("B", "old", "new", 5) - stats.stat_operation("A", "old", "new", 1) - stats.stat_operation("A", "old", "new", 2) - stats.stat_operation("C", "old", "new", 10) - stats.stat_operation("C", "old", "new", 11) +def test_statistics_stats(): + stats = storage_statistics.StorageStatistics() + stats.stat_operation(stats.make_key("B", "old", "new"), 3) + stats.stat_operation(stats.make_key("B", "old", "new"), 4) + stats.stat_operation(stats.make_key("B", "old2", "new2"), 20) + stats.stat_operation(stats.make_key("B", "old", "new"), 5) + stats.stat_operation(stats.make_key("A", "old", "new"), 1) + stats.stat_operation(stats.make_key("A", "old", "new"), 2) + stats.stat_operation(stats.make_key("C", "old", "new"), 10) + stats.stat_operation(stats.make_key("C", "old", "new"), 11) keys = stats.sorted_keys() assert keys == [ ("A", "old", "new"), ("B", "old", "new"), ("B", "old2", "new2"), ("C", "old", "new") ] assert stats.stats[keys[0]] == [1, 2] assert stats.stats[keys[1]] == [3, 4, 5] assert stats.stats[keys[2]] == [20] assert stats.stats[keys[3]] == [10, 11] + +def test_statistics_log(): + stats = storage_statistics.StorageStatistics() + s = stats.log_operation_string(stats.make_key("Operation", "old_storage", "new_storage"), 22, "classname") + assert s == "Operation (old_storage -> new_storage) of classname size 22" + s = stats.log_operation_string(stats.make_key("InitialOperation", None, "some_new_storage"), 40, "a_classname") + assert s == "InitialOperation (some_new_storage) of a_classname size 40" \ No newline at end of file diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -6,7 +6,7 @@ from rpython.rlib import jit, rpath from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ - error, shadow + error, shadow, storage_statistics from spyvm.tool.analyseimage import create_image from spyvm.interpreter_proxy import VirtualMachine @@ -185,12 +185,11 @@ as_benchmark = True idx += 1 elif arg == "--strategy-log": - model.strategy_stats.do_log = True + storage_statistics.activate_statistics(log=True) elif arg == "--strategy-stats": - model.strategy_stats.do_stats = True + storage_statistics.activate_statistics(statistics=True) elif arg == "--strategy-stats-with-sizes": - model.strategy_stats.do_stats = True - model.strategy_stats.do_stats_sizes = True + storage_statistics.activate_statistics(statistics=True, statstics_sizes=True) elif path is None: path = argv[idx] else: @@ -224,8 +223,7 @@ else: _run_image(interp) result = 0 - if model.strategy_stats.do_stats: - model.strategy_stats.print_stats() + storage_statistics.print_statistics() return result From noreply at buildbot.pypy.org Fri Mar 28 14:29:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Mar 2014 14:29:19 +0100 (CET) Subject: [pypy-commit] benchmarks default: Remove even more indirection around abstract_threading. This version should create exactly Message-ID: <20140328132919.79D091D29DB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r242:1b4cc089a6e7 Date: 2014-03-28 14:29 +0100 http://bitbucket.org/pypy/benchmarks/changeset/1b4cc089a6e7/ Log: Remove even more indirection around abstract_threading. This version should create exactly getsegmentlimit() threads. diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -3,66 +3,40 @@ import thread try: - from __pypy__.thread import atomic + from __pypy__.thread import atomic, getsegmentlimit except ImportError: atomic = Lock() + def getsegmentlimit(): + return 1 + class Worker(Thread): """Thread executing tasks from a given tasks queue""" def __init__(self, queue): Thread.__init__(self) self.daemon = True - self.next_task = None - self.cond = Condition() self.queue = queue self.start() def run(self): - # the next line registers the at_commit_cb on interpreter - # level for this thread. This should be fixed in the - # interpreter (it causes a conflict in stmgcintf.register_at_commit_cb). - # thread.at_commit(lambda : 0, ()) - while True: - with self.cond: - while self.next_task is None: - self.cond.wait() - - func, args, kargs = self.next_task - self.next_task = None - - try: - func(*args, **kargs) - except Exception as e: - print e - - # first time put in queue by threadpool on creation + func, args, kwds = self.queue.get() try: - self.queue.put_nowait(self) - except Full: - # thread limit reached, I'll show myself out.. - return + func(*args, **kwds) + except Exception as e: + print e class ThreadPool(object): - def __init__(self, thread_queue_size=12): - self.threads = Queue(thread_queue_size) + def __init__(self): + self.input_queue = Queue() + for n in range(getsegmentlimit()): + Worker(self.input_queue) - def add_task(self, func, *args, **kargs): - try: - worker = self.threads.get_nowait() - except Empty: - worker = Worker(self.threads) + def add_task(self, func, *args, **kwds): + self.input_queue.put((func, args, kwds)) - with worker.cond: - worker.next_task = (func, args, kargs) - worker.cond.notify_all() - - - - -import multiprocessing -_thread_pool = ThreadPool(1.5 * multiprocessing.cpu_count()) +_thread_pool = ThreadPool() diff --git a/multithread/raytrace/raytrace.py b/multithread/raytrace/raytrace.py --- a/multithread/raytrace/raytrace.py +++ b/multithread/raytrace/raytrace.py @@ -125,7 +125,6 @@ -tasks = 0 def task(x, h, cameraPos, objs, lightSource): with atomic: for y in range(h): @@ -133,20 +132,8 @@ (Vector(x/50.0-5,y/50.0-5,0)-cameraPos).normal()) trace(ray, objs, lightSource, 10) - global tasks - with atomic: - tasks -= 1 - futures = [] def future_dispatcher(ths, *args): - global tasks - - while tasks >= ths: - time.sleep(0) - - with atomic: - tasks += 1 - futures.append(Future(task, *args)) @@ -167,13 +154,11 @@ cameraPos = Vector(0,0,20) for x in range(w): - print x future_dispatcher(ths, x, h, cameraPos, objs, lightSource) for f in futures: f() del futures[:] - assert tasks == 0 if __name__ == '__main__': From noreply at buildbot.pypy.org Fri Mar 28 15:26:25 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 28 Mar 2014 15:26:25 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: compile test_capi once for a version of pypy and the c file Message-ID: <20140328142625.16C851D29DC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70315:8154d15d1925 Date: 2014-03-28 17:17 +0300 http://bitbucket.org/pypy/pypy/changeset/8154d15d1925/ Log: compile test_capi once for a version of pypy and the c file diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -7,12 +7,13 @@ return ext -def compile_shared(csource, modulename): +def compile_shared(csource, modulename, output_dir=None): """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, and import it. """ thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() + if output_dir is None: + output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,7 +1,30 @@ +import sys, tempfile, imp, binascii, os + try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") -else: + +def get_hashed_dir(cfile): + # from cffi's Verifier() + key = '\x00'.join([sys.version[:3], cfile]) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + return output_dir + +cfile = '_testcapimodule.c' +output_dir = get_hashed_dir(cfile) + +try: + fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) + imp.load_module('_testcapi', fp, filename, description) +except ImportError: import _pypy_testcapi - _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') + _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) From noreply at buildbot.pypy.org Fri Mar 28 15:26:26 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 28 Mar 2014 15:26:26 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: Backed out changeset: 2a672c888cae Message-ID: <20140328142626.705FE1D29DC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70316:395b0545a649 Date: 2014-03-28 17:19 +0300 http://bitbucket.org/pypy/pypy/changeset/395b0545a649/ Log: Backed out changeset: 2a672c888cae diff --git a/pypy/goal/python27.lib b/pypy/goal/python27.lib deleted file mode 100644 Binary file pypy/goal/python27.lib has changed From noreply at buildbot.pypy.org Fri Mar 28 15:26:27 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 28 Mar 2014 15:26:27 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: merge heads Message-ID: <20140328142627.B23601D29DC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70317:d18e09680f30 Date: 2014-03-28 17:19 +0300 http://bitbucket.org/pypy/pypy/changeset/d18e09680f30/ Log: merge heads diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -7,12 +7,13 @@ return ext -def compile_shared(csource, modulename): +def compile_shared(csource, modulename, output_dir=None): """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, and import it. """ thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() + if output_dir is None: + output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,7 +1,30 @@ +import sys, tempfile, imp, binascii, os + try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") -else: + +def get_hashed_dir(cfile): + # from cffi's Verifier() + key = '\x00'.join([sys.version[:3], cfile]) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + return output_dir + +cfile = '_testcapimodule.c' +output_dir = get_hashed_dir(cfile) + +try: + fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) + imp.load_module('_testcapi', fp, filename, description) +except ImportError: import _pypy_testcapi - _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') + _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) From noreply at buildbot.pypy.org Fri Mar 28 15:26:29 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 28 Mar 2014 15:26:29 +0100 (CET) Subject: [pypy-commit] pypy win32-fixes4: ignore build artifact Message-ID: <20140328142629.0617A1D29DC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70318:17c1d8d19dd5 Date: 2014-03-28 17:22 +0300 http://bitbucket.org/pypy/pypy/changeset/17c1d8d19dd5/ Log: ignore build artifact diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -64,6 +64,7 @@ ^pypy/goal/pypy-jvm.jar ^pypy/goal/.+\.exe$ ^pypy/goal/.+\.dll$ +^pypy/goal/.+\.lib$ ^pypy/_cache$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ From noreply at buildbot.pypy.org Fri Mar 28 17:49:54 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 17:49:54 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Fixed test. Message-ID: <20140328164954.307FF1D2575@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r718:478cf6a974a4 Date: 2014-03-28 17:49 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/478cf6a974a4/ Log: Fixed test. diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -68,8 +68,8 @@ w('ensure'), space.w_BlockClosure]) # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) - s_initial_frame = create_method_shadow(chr(0x7c)).create_frame(space, w(0), []) - w_frame = s_method.create_frame(space, w(0), [], sender=s_initial_frame).w_self() + s_initial_frame = create_method_shadow(chr(0x7c)).create_frame(w(0), []) + w_frame = s_method.create_frame(w(0), [], sender=s_initial_frame).w_self() try: interp.loop(w_frame) From noreply at buildbot.pypy.org Fri Mar 28 17:49:51 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 17:49:51 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Added is_nil(space) as convenience method to test for nil. Message-ID: <20140328164951.CAFD01D2575@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r716:db7a0bfbcbee Date: 2014-03-28 17:33 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/db7a0bfbcbee/ Log: Added is_nil(space) as convenience method to test for nil. Mainly to make different comparisons consistent (== vs is vs is_same_object). Also made comparisons with w_true/w_false consistent (not often enough for special method). Fixed test_interpreter.py to only put W_Object instances into other W_Objects. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -197,7 +197,7 @@ if not self.next_wakeup_tick == 0 and now >= self.next_wakeup_tick: self.next_wakeup_tick = 0 semaphore = self.space.objtable["w_timerSemaphore"] - if not semaphore.is_same_object(self.space.w_nil): + if not semaphore.is_nil(self.space): wrapper.SemaphoreWrapper(self.space, semaphore).signal(s_frame.w_self()) # We have no finalization process, so far. # We do not support external semaphores. @@ -445,7 +445,7 @@ def activate_unwind_context(self, interp): # the first temp is executed flag for both #ensure: and #ifCurtailed: - if self.gettemp(1) is self.space.w_nil: + if self.gettemp(1).is_nil(self.space): self.settemp(1, self.space.w_true) # mark unwound self.push(self.gettemp(0)) # push the first argument try: diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -380,9 +380,9 @@ @expose_on_virtual_machine_proxy([oop], bool) def booleanValueOf(w_object): space = IProxy.space - if w_object is space.w_true: + if space.w_true.is_same_object(w_object): return True - if w_object is space.w_false: + if space.w_false.is_same_object(w_object): return False raise ProxyFunctionFailed diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -123,6 +123,10 @@ SmallIntegers and Floats need a different implementation.""" return self is other + def is_nil(self, space): + """Return True, if the receiver represents the nil object in the given Object Space.""" + return self.is_same_object(space.w_nil) + def become(self, other): """Become swaps two objects. False means swapping failed""" diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -38,7 +38,7 @@ def intOrIfNil(space, w_int, i): - if w_int is space.w_nil: + if w_int.is_nil(space): return i elif isinstance(w_int, model.W_Float): return intmask(int(space.unwrap_float(w_int))) @@ -68,7 +68,7 @@ return s_form def loadHalftone(self, w_halftone_form): - if w_halftone_form is self.space.w_nil: + if w_halftone_form.is_nil(self.space): return None elif isinstance(w_halftone_form, model.W_WordsObject): # Already a bitmap @@ -94,7 +94,7 @@ self.w_destForm = self.fetch(0) self.dest = self.loadForm(self.w_destForm) self.w_sourceForm = self.fetch(1) - if self.w_sourceForm is not self.space.w_nil: + if not self.w_sourceForm.is_nil(self.space): self.source = self.loadForm(self.w_sourceForm) else: self.source = None @@ -739,7 +739,7 @@ if self.size() < 5: return self.w_bits = self.fetch(0) - if self.w_bits is self.space.w_nil: + if self.w_bits.is_nil(self.space): return if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): return @@ -755,7 +755,7 @@ return w_offset = self.fetch(4) assert isinstance(w_offset, model.W_PointersObject) - if not w_offset is self.space.w_nil: + if not w_offset.is_nil(self.space): self.offsetX = self.intOrIfNil(w_offset.fetch(self.space, 0), 0) self.offsetY = self.intOrIfNil(w_offset.fetch(self.space, 1), 0) self.pixPerWord = 32 / self.depth diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1157,7 +1157,7 @@ @expose_primitive(FILE_OPEN, unwrap_spec=[object, str, object]) def func(interp, s_frame, w_rcvr, filename, w_writeable_flag): - if w_writeable_flag is interp.space.w_true: + if w_writeable_flag.is_same_object(interp.space.w_true): mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC else: mode = os.O_RDONLY diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -83,7 +83,7 @@ return find_storage_for_objects(self.space, [w_val]) @staticmethod def static_can_contain(space, w_val): - return w_val == space.w_nil + return isinstance(w_val, model.W_Object) and w_val.is_nil(space) class AbstractValueOrNilStorageMixin(object): # Class must provide: wrap, unwrap, nil_value, is_nil_value, wrapper_class @@ -108,7 +108,7 @@ return self.wrap(self.space, val) def do_store(self, n0, w_val): - if w_val == self.space.w_nil: + if w_val.is_nil(self.space): self.storage[n0] = self.nil_value else: self.storage[n0] = self.unwrap(self.space, w_val) @@ -116,7 +116,7 @@ # This is to avoid code duplication @objectmodel.specialize.arg(0) def _value_or_nil_can_handle(cls, space, w_val): - return w_val == space.w_nil or \ + return isinstance(w_val, model.W_Object) and w_val.is_nil(space) or \ (isinstance(w_val, cls.wrapper_class) \ and not cls.is_nil_value(cls.unwrap(space, w_val))) @@ -338,7 +338,7 @@ def store_w_superclass(self, w_class): superclass = self._s_superclass - if w_class is None or w_class.is_same_object(self.space.w_nil): + if w_class is None or w_class.is_nil(self.space): if superclass: superclass.detach_s_class(self) self._s_superclass = None else: @@ -352,7 +352,7 @@ def store_w_methoddict(self, w_methoddict): methoddict = self._s_methoddict - if w_methoddict is None or w_methoddict.is_same_object(self.space.w_nil): + if w_methoddict is None or w_methoddict.is_nil(self.space): if methoddict: methoddict.s_class = None self._s_methoddict = None else: @@ -554,7 +554,7 @@ self.methoddict = {} for i in range(size): w_selector = self.w_self().fetch(self.space, constants.METHODDICT_NAMES_INDEX+i) - if not w_selector.is_same_object(self.space.w_nil): + if not w_selector.is_nil(self.space): if not isinstance(w_selector, model.W_BytesObject): pass # TODO: Check if there's more assumptions about this. @@ -713,7 +713,7 @@ def store_w_sender(self, w_sender): assert isinstance(w_sender, model.W_PointersObject) - if w_sender.is_same_object(self.space.w_nil): + if w_sender.is_nil(self.space): self._s_sender = None else: self.store_s_sender(w_sender.as_context_get_shadow(self.space)) @@ -727,7 +727,7 @@ return self._s_sender def store_unwrap_pc(self, w_pc): - if w_pc.is_same_object(self.space.w_nil): + if w_pc.is_nil(self.space): self.store_pc(-1) else: pc = self.space.unwrap_int(w_pc) @@ -762,7 +762,7 @@ assert self == e.s_context def is_returned(self): - return self.pc() == -1 and self.w_sender is self.space.w_nil + return self.pc() == -1 and self.w_sender.is_nil(self.space) # ______________________________________________________________________ # Method that contains the bytecode for this method/block context @@ -1147,7 +1147,7 @@ return self.size() - self.tempsize() def is_closure_context(self): - return self.w_closure_or_nil is not self.space.w_nil + return not self.w_closure_or_nil.is_nil(self.space) def __str__(self): retval = '\nMethodContext of:' diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -299,7 +299,7 @@ if self.special_object(so_index).w_object is None: self.special_object(so_index).w_object = w_object else: - if self.special_object(0).w_object is not self.space.w_nil: + if not self.special_object(0).w_object.is_nil(self.space): raise Warning('Object found in multiple places in the special objects array') def special_object(self, index): @@ -382,7 +382,7 @@ def run_spy_hacks(self, space): pass # w_display = space.objtable["w_display"] - # if w_display is not None and w_display is not space.w_nil: + # if w_display is not None and not w_display.is_nil(space): # if space.unwrap_int(w_display.fetch(space, 3)) < 8: # # non-native indexed color depth not well supported # w_display.store(space, 3, space.wrap_int(8)) diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -6,6 +6,7 @@ def setup_module(): space, interp = create_space_interp(bootstrap = True) + w = space.w copy_to_module(locals(), __name__) def teardown_module(): @@ -26,6 +27,15 @@ nlr.s_target_context.push(nlr.value) return nlr.s_target_context.w_self() +def assert_list(list, expected): + for i in range(len(list)): + exp = expected[i] + if isinstance(exp, str): + assert exp == list[i].as_string() + if not isinstance(exp, model.W_Object): + exp = w(exp) + assert list[i].is_same_object(exp) + # expose the bytecode's values as global constants. # Bytecodes that have a whole range are exposed as global functions: # call them with an argument 'n' to get the bytecode number 'base + n'. @@ -103,7 +113,7 @@ w_method.setliterals([model.W_PointersObject(space, None, 2)]) if receiver is None: receiver = space.w_nil - s_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space, receiver, ["foo", "bar"]) + s_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space, receiver, [space.w("foo"), space.w("bar")]) return s_frame.w_self(), s_frame def new_frame(bytes, receiver=None): @@ -117,31 +127,31 @@ w_method.islarge = 1 w_method.argsize=2 w_method.tempsize=8 - s_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space, "receiver", ["foo", "bar"]) + s_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space, w("receiver"), [w("foo"), w("bar")]) w_frame = s_frame.w_self() - assert s_frame.w_receiver() == "receiver" - assert s_frame.gettemp(0) == "foo" - assert s_frame.gettemp(1) == "bar" - assert s_frame.gettemp(2) is space.w_nil - s_frame.settemp(2, "spam") - assert s_frame.gettemp(2) == "spam" + assert s_frame.w_receiver().as_string() == "receiver" + assert s_frame.gettemp(0).as_string() == "foo" + assert s_frame.gettemp(1).as_string() == "bar" + assert s_frame.gettemp(2).is_nil(space) + s_frame.settemp(2, w("spam")) + assert s_frame.gettemp(2).as_string() == "spam" assert s_frame.getbytecode() == ord("h") assert s_frame.getbytecode() == ord("e") assert s_frame.getbytecode() == ord("l") def test_push_pop(): _, frame = new_frame("") - frame.push(12) - frame.push(34) - frame.push(56) - assert frame.peek(2) == 12 - assert frame.pop() == 56 - assert frame.top() == 34 + frame.push(w(12)) + frame.push(w(34)) + frame.push(w(56)) + assert frame.peek(2).value == 12 + assert frame.pop().value == 56 + assert frame.top().value == 34 frame.pop_n(0) - assert frame.top() == 34 - frame.push(56) + assert frame.top().value == 34 + frame.push(w(56)) frame.pop_n(2) - assert frame.top() == 12 + assert frame.top().value == 12 def test_unknownBytecode(): w_frame, s_frame = new_frame(unknownBytecode) @@ -158,26 +168,26 @@ pushReceiverVariableBytecode(1) + pushReceiverVariableBytecode(2))): w_demo = bootstrap_class(3).as_class_get_shadow(space).new() - w_demo.store(space, 0, "egg") - w_demo.store(space, 1, "bar") - w_demo.store(space, 2, "baz") + w_demo.store(space, 0, w("egg")) + w_demo.store(space, 1, w("bar")) + w_demo.store(space, 2, w("baz")) w_frame, s_frame = new_frame(bytecode, receiver = w_demo) s_frame = w_frame.as_context_get_shadow(space) step_in_interp(s_frame) step_in_interp(s_frame) step_in_interp(s_frame) - assert s_frame.stack() == ["egg", "bar", "baz"] + assert_list(s_frame.stack(), ["egg", "bar", "baz"]) def test_pushTemporaryVariableBytecode(bytecode=(pushTemporaryVariableBytecode(0) + pushTemporaryVariableBytecode(1) + pushTemporaryVariableBytecode(2))): w_frame, s_frame = new_frame(bytecode) s_frame = w_frame.as_context_get_shadow(space) - s_frame.settemp(2, "temp") + s_frame.settemp(2, w("temp")) step_in_interp(s_frame) step_in_interp(s_frame) step_in_interp(s_frame) - assert s_frame.stack() == ["foo", "bar", "temp"] + assert_list(s_frame.stack(), ["foo", "bar", "temp"]) def test_pushLiteralConstantBytecode(bytecode=pushLiteralConstantBytecode(0) + pushLiteralConstantBytecode(1) + @@ -187,18 +197,16 @@ step_in_interp(s_frame) step_in_interp(s_frame) step_in_interp(s_frame) - assert s_frame.stack() == [fakesymbol("a"), - fakesymbol("b"), - fakesymbol("c")] + assert_list(s_frame.stack(), [fakesymbol("a"), fakesymbol("b"), fakesymbol("c")]) def test_pushLiteralVariableBytecode(bytecode=pushLiteralVariableBytecode(0)): w_association = bootstrap_class(2).as_class_get_shadow(space).new() - w_association.store(space, 0, "mykey") - w_association.store(space, 1, "myvalue") + w_association.store(space, 0, w("mykey")) + w_association.store(space, 1, w("myvalue")) w_frame, s_frame = new_frame(bytecode) s_frame.w_method().setliterals( fakeliterals(space, w_association)) step_in_interp(s_frame) - assert s_frame.stack() == ["myvalue"] + assert_list(s_frame.stack(), ["myvalue"]) def test_storeAndPopReceiverVariableBytecode(bytecode=storeAndPopReceiverVariableBytecode, popped=True): @@ -212,13 +220,13 @@ if popped: assert s_frame.stack() == [] else: - assert s_frame.stack() == [space.w_true] + assert_list(s_frame.stack(), [space.w_true]) for test_index in range(8): if test_index == index: assert w_object.fetch(space, test_index).is_same_object(space.w_true) else: - assert w_object.fetch(space, test_index) is space.w_nil + assert w_object.fetch(space, test_index).is_nil(space) def test_storeAndPopTemporaryVariableBytecode(bytecode=storeAndPopTemporaryVariableBytecode): for index in range(8): @@ -230,9 +238,9 @@ for test_index in range(8): print w_frame.fetch_all(s_frame.space) if test_index == index: - assert s_frame.gettemp(test_index) == space.w_true + assert s_frame.gettemp(test_index).is_same_object(space.w_true) else: - assert s_frame.gettemp(test_index) != space.w_true + assert not s_frame.gettemp(test_index).is_same_object(space.w_true) def test_pushConstantTrueBytecode(): w_frame, s_frame = new_frame(pushConstantTrueBytecode) @@ -249,7 +257,7 @@ def test_pushConstantNilBytecode(): w_frame, s_frame = new_frame(pushConstantNilBytecode) step_in_interp(s_frame) - assert s_frame.pop().is_same_object(space.w_nil) + assert s_frame.pop().is_nil(space) assert s_frame.stack() == [] def test_pushConstantMinusOneBytecode(): @@ -279,14 +287,14 @@ def test_pushActiveContextBytecode(): w_frame, s_frame = new_frame(pushActiveContextBytecode) step_in_interp(s_frame) - assert s_frame.pop() == w_frame + assert s_frame.pop().is_same_object(w_frame) assert s_frame.stack() == [] def test_duplicateTopBytecode(): w_frame, s_frame = new_frame(pushConstantZeroBytecode + duplicateTopBytecode) step_in_interp(s_frame) step_in_interp(s_frame) - assert s_frame.stack() == [space.w_zero, space.w_zero] + assert_list(s_frame.stack(), [space.w_zero, space.w_zero]) def test_bytecodePrimBitAnd(): w_frame, s_frame = new_frame(pushConstantOneBytecode + pushConstantTwoBytecode + bytecodePrimBitAnd) @@ -364,14 +372,14 @@ step_in_interp(s_frame) step_in_interp(s_frame) step_in_interp(s_frame) - assert s_frame.pop() == space.w_false + assert s_frame.pop().is_same_object(space.w_false) assert s_frame.stack() == [] w_frame, s_frame = new_frame(pushConstantOneBytecode + pushConstantOneBytecode + bytecodePrimEquivalent) step_in_interp(s_frame) step_in_interp(s_frame) step_in_interp(s_frame) - assert s_frame.pop() == space.w_true + assert s_frame.pop().is_same_object(space.w_true) assert s_frame.stack() == [] def test_bytecodePrimNew(): @@ -447,7 +455,7 @@ w_active_context = step_in_interp(s_active_context) s_active_context = w_active_context.as_context_get_shadow(space) assert w_active_context == w_frame - assert s_active_context.stack() == [result] + assert_list(s_active_context.stack(), [result]) def test_sendLiteralSelectorBytecode(): w_class = bootstrap_class(0) @@ -554,7 +562,7 @@ w_frame, s_frame = new_frame(pushConstantTrueBytecode + popStackBytecode) step_in_interp(s_frame) - assert s_frame.stack() == [space.w_true] + assert_list(s_frame.stack(), [space.w_true]) step_in_interp(s_frame) assert s_frame.stack() == [] @@ -575,8 +583,8 @@ def storeAssociation(bytecode): w_association = bootstrap_class(2).as_class_get_shadow(space).new() - w_association.store(space, 0, "mykey") - w_association.store(space, 1, "myvalue") + w_association.store(space, 0, w("mykey")) + w_association.store(space, 1, w("myvalue")) w_frame, s_frame = new_frame(pushConstantOneBytecode + bytecode) s_frame.w_method().setliterals(fakeliterals(space, w_association)) step_in_interp(s_frame) @@ -627,9 +635,9 @@ s_frame.push(space.w_one) s_frame.push(space.w_two) step_in_interp(s_frame) - assert s_frame.stack() == [space.w_true, space.w_false, + assert_list(s_frame.stack(), [space.w_true, space.w_false, space.w_true, space.w_false, - space.w_false, space.w_true] + space.w_false, space.w_true]) def test_singleExtendedSendBytecode(): w_class = bootstrap_class(0) @@ -760,7 +768,7 @@ # ^ [ self ] value assert interpret_bc( [ 137, 117, 200, 164, 2, 112, 125, 201, 124 ], - fakeliterals(space, space.wrap_int(3))) is space.w_nil + fakeliterals(space, space.wrap_int(3))).is_nil(space) def test_bc_value_return(): # valueReturn @@ -877,52 +885,52 @@ # Closure Bytecodes def test_bc_pushNewArrayBytecode(bytecode=pushNewArrayBytecode): w_frame, s_frame = new_frame(bytecode + chr(0x83)) - s_frame.push(fakeliterals(space, "egg")) - s_frame.push(fakeliterals(space, "bar")) - s_frame.push(fakeliterals(space, "baz")) + s_frame.push(w(fakeliterals(space, "egg"))) + s_frame.push(w(fakeliterals(space, "bar"))) + s_frame.push(w(fakeliterals(space, "baz"))) step_in_interp(s_frame) array = s_frame.pop() - assert array.at0(space, 0) == fakeliterals(space, "egg") - assert array.at0(space, 1) == fakeliterals(space, "bar") - assert array.at0(space, 2) == fakeliterals(space, "baz") + assert space.unwrap_array(array.at0(space, 0)) == fakeliterals(space, "egg") + assert space.unwrap_array(array.at0(space, 1)) == fakeliterals(space, "bar") + assert space.unwrap_array(array.at0(space, 2)) == fakeliterals(space, "baz") def test_bc_pushNewArray(bytecode=pushNewArrayBytecode): w_frame, s_frame = new_frame(bytecode + chr(0x07)) step_in_interp(s_frame) array = s_frame.pop() assert array.size() == 7 - assert array.at0(space, 0) == space.w_nil + assert array.at0(space, 0).is_nil(space) def test_bc_pushRemoteTempLongBytecode(bytecode = pushRemoteTempLongBytecode): w_frame, s_frame = new_frame(bytecode + chr(0) + chr(0)) s_frame.settemp(0, space.w_Array.as_class_get_shadow(interp.space).new(2)) step_in_interp(s_frame) - assert s_frame.top() == space.w_nil + assert s_frame.top().is_nil(space) def setupTempArrayAndContext(bytecode): # both indizes are 0-relative w_frame, s_frame = new_frame(bytecode + chr(2) + chr(1)) - s_frame.push(fakeliterals(space, "english")) - s_frame.push(fakeliterals(space, "bar")) + s_frame.push(w(fakeliterals(space, "english"))) + s_frame.push(w(fakeliterals(space, "bar"))) temp_array = space.w_Array.as_class_get_shadow(interp.space).new(3) - temp_array.atput0(space, 2, fakeliterals(space, "pub")) + temp_array.atput0(space, 2, w(fakeliterals(space, "pub"))) s_frame.settemp(1, temp_array) step_in_interp(s_frame) return s_frame, temp_array def test_bc_pushRemoteTempLongBytecode2(bytecode = pushRemoteTempLongBytecode): context, _ = setupTempArrayAndContext(bytecode) - assert context.top() == fakeliterals(space, "pub") + assert space.unwrap_array(context.top()) == fakeliterals(space, "pub") def test_bc_storeRemoteTempLongBytecode(bytecode = storeRemoteTempLongBytecode): context, temp_array = setupTempArrayAndContext(bytecode) - assert context.top() == fakeliterals(space, "bar") - assert temp_array.at0(space, 2) == fakeliterals(space, "bar") + assert space.unwrap_array(context.top()) == fakeliterals(space, "bar") + assert space.unwrap_array(temp_array.at0(space, 2)) == fakeliterals(space, "bar") def test_bc_storeAndPopRemoteTempLongBytecode(bytecode = storeAndPopRemoteTempLongBytecode): context, temp_array = setupTempArrayAndContext(bytecode) - assert temp_array.at0(space, 2) == fakeliterals(space, "bar") - assert context.top() == fakeliterals(space, "english") + assert space.unwrap_array(temp_array.at0(space, 2)) == fakeliterals(space, "bar") + assert space.unwrap_array(context.top()) == fakeliterals(space, "english") def test_bc_pushClosureCopyCopied0ValuesBytecode(bytecode = pushClosureCopyCopiedValuesBytecode): for i in (0, 0xF0, 0x0FF0, 0xFFF0): @@ -937,16 +945,16 @@ def test_bc_pushClosureCopyCopied2ValuesBytecode(bytecode = pushClosureCopyCopiedValuesBytecode): w_frame, s_frame = new_frame(bytecode + chr(0x23) + chr(0) + chr(0)) - s_frame.push("english") - s_frame.push("bar") + s_frame.push(w("english")) + s_frame.push(w("bar")) pc = s_frame.pc() step_in_interp(s_frame) assert s_frame.pc() == pc + 4 closure = wrapper.BlockClosureWrapper(space, s_frame.top()) assert closure.startpc() == pc + 4 + 5 assert closure.outerContext() is s_frame._w_self - assert closure.at0(0) == "english" - assert closure.at0(1) == "bar" + assert closure.at0(0).as_string() == "english" + assert closure.at0(1).as_string() == "bar" def test_blockclosure_valuevalue(): #someTest diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -34,7 +34,7 @@ w_myinstance = w_mycls.as_class_get_shadow(space).new() assert isinstance(w_myinstance, model.W_PointersObject) assert w_myinstance.getclass(space).is_same_object(w_mycls) - assert w_myinstance.fetch(space, 0) is space.w_nil + assert w_myinstance.fetch(space, 0).is_nil(space) py.test.raises(IndexError, lambda: w_myinstance.fetch(space, 3)) w_myinstance.store(space, 1, w_myinstance) assert w_myinstance.fetch(space, 1) is w_myinstance @@ -416,4 +416,4 @@ # When executed using pypy, del is not immediately executed. # Thus the reference may linger until the next gc... import gc; gc.collect() - assert weak_object.fetch(space, 0) is space.w_nil + assert weak_object.fetch(space, 0).is_nil(space) diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -328,7 +328,7 @@ w_q = space.w_Character.as_class_get_shadow(space).new() vidx = constants.CHARACTER_VALUE_INDEX+1 ordq = ord("q") - assert prim(primitives.INST_VAR_AT, [w_q, vidx]) == space.w_nil + assert prim(primitives.INST_VAR_AT, [w_q, vidx]).is_nil(space) assert prim(primitives.INST_VAR_AT_PUT, [w_q, vidx, ordq]).value == ordq assert prim(primitives.INST_VAR_AT, [w_q, vidx]).value == ordq @@ -361,7 +361,7 @@ (primitives.PUSH_TWO, space.w_two), ]: assert prim(code, [space.w_nil]).is_same_object(const) - assert prim(primitives.PUSH_SELF, [space.w_nil]).is_same_object(space.w_nil) + assert prim(primitives.PUSH_SELF, [space.w_nil]).is_nil(space) assert prim(primitives.PUSH_SELF, ["a"]) is wrap("a") def test_boolean(): @@ -451,7 +451,7 @@ def test_interrupt_semaphore(): prim(primitives.INTERRUPT_SEMAPHORE, [1, space.w_true]) - assert space.objtable["w_interrupt_semaphore"] is space.w_nil + assert space.objtable["w_interrupt_semaphore"].is_nil(space) class SemaphoreInst(model.W_Object): def getclass(self, space): @@ -485,7 +485,7 @@ w_method = prim(primitives.NEW_METHOD, [space.w_CompiledMethod, len(bytecode), 1025]) assert w_method.literalat0(space, 0).value == 1025 assert w_method.literalsize == 2 - assert w_method.literalat0(space, 1).is_same_object(space.w_nil) + assert w_method.literalat0(space, 1).is_nil(space) assert w_method.bytes == ["\x00"] * len(bytecode) def test_image_name(): @@ -567,7 +567,7 @@ w_outer_frame, s_initial_context = new_frame("") w_block = prim(primitives.CLOSURE_COPY_WITH_COPIED_VALUES, map(wrap, [w_outer_frame, 2, [wrap(1), wrap(2)]]), w_frame) - assert w_block is not space.w_nil + assert not w_block.is_nil(space) w_w_block = wrapper.BlockClosureWrapper(space, w_block) assert w_w_block.startpc() is 5 assert w_w_block.at0(0) == wrap(1) @@ -604,7 +604,7 @@ assert s_new_context.w_closure_or_nil is closure assert s_new_context.s_sender() is s_initial_context - assert s_new_context.w_receiver() is space.w_nil + assert s_new_context.w_receiver().is_nil(space) def test_primitive_closure_value_value(): s_initial_context, closure, s_new_context = build_up_closure_environment([ @@ -612,7 +612,7 @@ assert s_new_context.w_closure_or_nil is closure assert s_new_context.s_sender() is s_initial_context - assert s_new_context.w_receiver() is space.w_nil + assert s_new_context.w_receiver().is_nil(space) assert s_new_context.gettemp(0).as_string() == "first arg" assert s_new_context.gettemp(1).as_string() == "second arg" @@ -623,7 +623,7 @@ assert s_new_context.w_closure_or_nil is closure assert s_new_context.s_sender() is s_initial_context - assert s_new_context.w_receiver() is space.w_nil + assert s_new_context.w_receiver().is_nil(space) assert s_new_context.gettemp(0).as_string() == "first arg" assert s_new_context.gettemp(1).as_string() == "second arg" assert s_new_context.gettemp(2).as_string() == "some value" diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -171,7 +171,7 @@ def assert_contains_nils(w_obj): for i in range(w_obj.size()): - assert space.w_nil == w_obj.fetch(space, i) + assert w_obj.fetch(space, i).is_nil(space) def test_attach_mc(): w_m = create_method() @@ -257,7 +257,7 @@ s_methoddict.sync_method_cache() i = 0 key = s_methoddict.w_self().fetch(s_methoddict.space, constants.METHODDICT_NAMES_INDEX+i) - while key is space.w_nil: + while key.is_nil(space): i = i + 1 key = s_methoddict.w_self().fetch(s_methoddict.space, constants.METHODDICT_NAMES_INDEX+i) @@ -295,9 +295,9 @@ def test_returned_contexts_pc(): w_context = methodcontext() s_context = w_context.as_methodcontext_get_shadow(space) - assert w_context.fetch(space, constants.CTXPART_PC_INDEX) is not space.w_nil + assert not w_context.fetch(space, constants.CTXPART_PC_INDEX).is_nil(space) s_context.mark_returned() - assert w_context.fetch(space, constants.CTXPART_PC_INDEX) is space.w_nil + assert w_context.fetch(space, constants.CTXPART_PC_INDEX).is_nil(space) def test_methodcontext_s_home(): w_context = methodcontext() diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -34,7 +34,7 @@ for i in range(arr.size()): w_val = arr.fetch(space, i) if expected[i] == w_nil: - assert w_val == w_nil + assert w_val.is_nil(space) elif isinstance(expected[i], int): assert isinstance(w_val, model.W_SmallInteger) assert space.unwrap_int(w_val) == expected[i] @@ -79,7 +79,7 @@ def test_List_fetch(): a = list_arr(5) assert a.fetch(space, 0).getclass(space) == class_Array - assert a.fetch(space, 4) == w_nil + assert a.fetch(space, 4).is_nil(space) def test_List_size(): a = list_arr(5) diff --git a/spyvm/test/test_wrapper.py b/spyvm/test/test_wrapper.py --- a/spyvm/test/test_wrapper.py +++ b/spyvm/test/test_wrapper.py @@ -97,7 +97,7 @@ w_last = space.w_nil for w_process in processes_w[::-1]: w_first = newprocess(w_first, w_processlist)._w_self - if w_last is space.w_nil: + if w_last.is_nil(space): w_last = w_first pl = wrapper.ProcessListWrapper(space, w_processlist) pl.store_first_link(w_first) @@ -155,8 +155,8 @@ w_frame = process.suspend(space.w_true) process_list = wrapper.scheduler(space).get_process_list(process.priority()) assert process_list.first_link() is process_list.last_link() - assert process_list.first_link() is space.w_nil - assert process.my_list() is space.w_nil + assert process_list.first_link().is_nil(space) + assert process.my_list().is_nil(space) def test_suspend_active(self): suspended_context = new_frame() @@ -166,8 +166,8 @@ old_process.suspend(current_context) process_list = wrapper.scheduler(space).get_process_list(old_process.priority()) assert process_list.first_link() is process_list.last_link() - assert process_list.first_link() is space.w_nil - assert old_process.my_list() is space.w_nil + assert process_list.first_link().is_nil(space) + assert old_process.my_list().is_nil(space) assert old_process.suspended_context() is current_context assert wrapper.scheduler(space).active_process() is process._w_self diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -103,12 +103,12 @@ def suspend(self, w_current_frame): if self.is_active_process(): - assert self.my_list().is_same_object(self.space.w_nil) + assert self.my_list().is_nil(self.space) w_process = scheduler(self.space).pop_highest_priority_process() self.store_suspended_context(w_current_frame) return ProcessWrapper(self.space, w_process).activate() else: - if self.my_list() is not self.space.w_nil: + if not self.my_list().is_nil(self.space): process_list = ProcessListWrapper(self.space, self.my_list()) process_list.remove(self._w_self) self.store_my_list(self.space.w_nil) @@ -119,7 +119,7 @@ last_link, store_last_link = make_getter_setter(1) def is_empty_list(self): - return self.first_link().is_same_object(self.space.w_nil) + return self.first_link().is_nil(self.space) def add_last_link(self, w_object): if self.is_empty_list(): @@ -147,12 +147,12 @@ else: current = LinkWrapper(self.space, self.first_link()) w_next = current.next_link() - while not w_next.is_same_object(self.space.w_nil): + while not w_next.is_nil(self.space): if w_next.is_same_object(w_link): LinkWrapper(self.space, w_link).store_next_link(self.space.w_nil) w_tail = LinkWrapper(self.space, w_next).next_link() current.store_next_link(w_tail) - if w_tail.is_same_object(self.space.w_nil): + if w_tail.is_nil(self.space): self.store_last_link(current._w_self) return current = LinkWrapper(self.space, w_next) From noreply at buildbot.pypy.org Fri Mar 28 17:49:53 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 17:49:53 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Removed redundant space parameter from two methods. Message-ID: <20140328164953.16E381D2575@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r717:a0f1836c6654 Date: 2014-03-28 17:46 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a0f1836c6654/ Log: Removed redundant space parameter from two methods. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -373,7 +373,7 @@ except primitives.PrimitiveFailedError: pass # ignore this error and fall back to the Smalltalk version arguments = self.pop_and_return_n(argcount) - s_frame = s_method.create_frame(self.space, receiver, arguments, self) + s_frame = s_method.create_frame(receiver, arguments, self) self.pop() # receiver # ###################################################################### @@ -398,7 +398,7 @@ assert isinstance(s_class, ClassShadow) print "Missing doesDoesNotUnderstand in hierarchy of %s" % s_class.getname() raise - s_frame = s_method.create_frame(self.space, receiver, [w_message], self) + s_frame = s_method.create_frame(receiver, [w_message], self) self.pop() # ###################################################################### diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1380,7 +1380,7 @@ return s_frame._call_primitive(code, interp, argcount, s_method, w_selector) except PrimitiveFailedError: pass # ignore this error and fall back to the Smalltalk version - s_new_frame = s_method.create_frame(interp.space, w_rcvr, args_w, s_frame) + s_new_frame = s_method.create_frame(w_rcvr, args_w, s_frame) s_frame.pop() return interp.stack_frame(s_new_frame) @@ -1393,7 +1393,7 @@ code = s_method.primitive() if code: raise PrimitiveFailedError("withArgs:executeMethod: not support with primitive method") - s_new_frame = s_method.create_frame(interp.space, w_rcvr, args_w, s_frame) + s_new_frame = s_method.create_frame(w_rcvr, args_w, s_frame) return interp.stack_frame(s_new_frame) @expose_primitive(SIGNAL, unwrap_spec=[object], clean_stack=False, no_result=True) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -196,10 +196,10 @@ def __init__(self, space, w_self, size): AbstractShadow.__init__(self, space, w_self) - self.initialize_storage(space, size) + self.initialize_storage(size) - def initialize_storage(self, space, size): - self.storage = [space.w_nil] * size + def initialize_storage(self, size): + self.storage = [self.space.w_nil] * size def fetch(self, n0): return self.storage[n0] def store(self, n0, w_value): @@ -208,7 +208,7 @@ return len(self.storage) def copy_from(self, other_shadow): if self.size() != other_shadow.size(): - self.initialize_storage(other_shadow.space, other_shadow.size()) + self.initialize_storage(other_shadow.size()) AbstractShadow.copy_from(self, other_shadow) class WeakListStorageShadow(AbstractShadow): @@ -1246,9 +1246,9 @@ def primitive(self): return self._primitive - def create_frame(self, space, receiver, arguments, sender = None): + def create_frame(self, receiver, arguments, sender = None): assert len(arguments) == self.argsize - return MethodContextShadow(space, None, self, receiver, arguments, sender) + return MethodContextShadow(self.space, None, self, receiver, arguments, sender) @constant_for_version def getbytecode(self, pc): diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -113,7 +113,7 @@ w_method.setliterals([model.W_PointersObject(space, None, 2)]) if receiver is None: receiver = space.w_nil - s_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space, receiver, [space.w("foo"), space.w("bar")]) + s_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(receiver, [space.w("foo"), space.w("bar")]) return s_frame.w_self(), s_frame def new_frame(bytes, receiver=None): @@ -127,7 +127,7 @@ w_method.islarge = 1 w_method.argsize=2 w_method.tempsize=8 - s_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space, w("receiver"), [w("foo"), w("bar")]) + s_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(w("receiver"), [w("foo"), w("bar")]) w_frame = s_frame.w_self() assert s_frame.w_receiver().as_string() == "receiver" assert s_frame.gettemp(0).as_string() == "foo" @@ -1003,7 +1003,7 @@ w_method.setliterals([space.wrap_int(11)]) #create a frame for that method - w_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space, space.wrap_int(0), []).w_self() + w_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space.wrap_int(0), []).w_self() try: interp.loop(w_frame) except interpreter.ReturnFromTopLevel, e: @@ -1013,7 +1013,7 @@ try: interp = interpreter.Interpreter(space, None, "", max_stack_depth=10) interp._loop = True - interp.c_loop(w_method.as_compiledmethod_get_shadow(space).create_frame(space, space.wrap_int(0), [])) + interp.c_loop(w_method.as_compiledmethod_get_shadow(space).create_frame(space.wrap_int(0), [])) except interpreter.StackOverflow, e: assert isinstance(e.s_context, shadow.MethodContextShadow) except interpreter.ReturnFromTopLevel, e: @@ -1049,7 +1049,7 @@ w_method.setliterals([space.wrap_int(11)]) #create a frame for that method - w_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space, space.wrap_int(0), []).w_self() + w_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space.wrap_int(0), []).w_self() try: interp.loop(w_frame) except interpreter.ReturnFromTopLevel, e: diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -42,8 +42,8 @@ w('ensure'), space.w_BlockClosure]) # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) - s_initial_frame = create_method_shadow(chr(0x7c)).create_frame(space, w(0), []) - w_frame = s_method.create_frame(space, w(0), [], sender=s_initial_frame).w_self() + s_initial_frame = create_method_shadow(chr(0x7c)).create_frame(w(0), []) + w_frame = s_method.create_frame(w(0), [], sender=s_initial_frame).w_self() try: interp.loop(w_frame) diff --git a/targettinybenchsmalltalk.py b/targettinybenchsmalltalk.py --- a/targettinybenchsmalltalk.py +++ b/targettinybenchsmalltalk.py @@ -25,7 +25,7 @@ w_object = model.W_SmallInteger(0) s_class = w_object.class_shadow(space) s_method = s_class.lookup(w_selector) - s_frame = s_method.create_frame(space, w_object, []) + s_frame = s_method.create_frame(w_object, []) return interp, s_frame interp, s_frame = setup() From noreply at buildbot.pypy.org Fri Mar 28 18:54:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Mar 2014 18:54:17 +0100 (CET) Subject: [pypy-commit] stmgc timelog: Trying to implement recording the aborts and other timing information Message-ID: <20140328175417.269941D2575@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: timelog Changeset: r1114:c19b4b51a580 Date: 2014-03-28 18:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/c19b4b51a580/ Log: Trying to implement recording the aborts and other timing information From noreply at buildbot.pypy.org Fri Mar 28 18:54:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Mar 2014 18:54:18 +0100 (CET) Subject: [pypy-commit] stmgc timelog: design document, for once Message-ID: <20140328175418.4602C1D2575@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: timelog Changeset: r1115:e6634d1cf9d2 Date: 2014-03-28 18:54 +0100 http://bitbucket.org/pypy/stmgc/changeset/e6634d1cf9d2/ Log: design document, for once diff --git a/c7/timelog.txt b/c7/timelog.txt new file mode 100644 --- /dev/null +++ b/c7/timelog.txt @@ -0,0 +1,83 @@ + +Reports +======= + +- self-abort: + WRITE_WRITE_CONTENTION, INEVITABLE_CONTENTION: + traceback in both threads, time lost by this thread + WRITE_READ_CONTENTION: + traceback pointing back to the write, time lost by this thread + +- aborted by a different thread: + WRITE_WRITE_CONTENTION: + traceback in both threads, time lost by this thread + WRITE_READ_CONTENTION: + remote traceback pointing back to the write, time lost by this thread + (no local traceback available to know where we've read the object from) + INEVITABLE_CONTENTION: + n/a + +- self-pausing: + same as self-abort, but reporting the time lost by pausing + +- waiting for a free segment: + - if we're waiting because of inevitability, report with a + traceback and the time lost + - if we're just waiting because of no free segment, don't report it, + or maybe with only the total time lost and no traceback + +- more internal reasons for cond_wait(), like synchronizing the threads, + should all be resolved quickly and are unlikely worth a report + + +Internal Measurements +===================== + +- use clock_gettime(CLOCK_MONOTONIC), it seems to be the fastest way + (less than 5 times slower than a RDTSC instruction, which is itself + not safe in the presence of threads migrating among CPUs) + +- record a fixed number of entries, as a fixed-size heapq list, with + higher recorded times sorted first; the entry with the lowest amount + of time is dropped. + +- if there are several aborts from the same transaction start, then + regroup them by traceback, and report only once with the number + of consecutive occurrences and the total time; do that before inserting + in the heapq list, as otherwise if we have a lot of quick aborts they + would all be lost as not contributing significant time individually + + +API of stmgc.h +============== + +- timelogs are always thread-local. We have APIs to create, clear and + destroy them; recorded entries go to all active timelogs of this thread. + +- the traceback reports are based on the user of the library pushing and + popping stack entries to the current stack in every thread. + +- we have APIs to enumerate a timelog's current entries, and enumerate + each traceback's recorded frames. + + +Tracebacks +========== + +Tracebacks are implemented as read-only objects in a linked list, each +one for one frame. Each such object has a reference count, so that we can +internally record the current stack by taking a reference to the current +top-of-stack, and keep it entirely alive by increasing just this object's +reference count. Some simple freelist should make this efficient for +the common case of objects freed shortly after being allocated. + +We record one traceback pointer for every old object written during this +transaction. It could be avoided only if we are running with no timelog +at all (not just none in this thread), but it's probably not worth the +optimization. + +This is all thread-local, with the exception of when we record another +thread's traceback. To implement this, we clone the complete traceback +into the other thread's local allocator. It should be fine because it +is only needed when we have already determined that this entry has an +important enough recorded time to be worth storing. From noreply at buildbot.pypy.org Fri Mar 28 20:38:28 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 28 Mar 2014 20:38:28 +0100 (CET) Subject: [pypy-commit] pypy default: fix broken link (thanks gregor) Message-ID: <20140328193828.160141D2575@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r70319:c77678f05b18 Date: 2014-03-28 12:37 -0700 http://bitbucket.org/pypy/pypy/changeset/c77678f05b18/ Log: fix broken link (thanks gregor) diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -14,7 +14,7 @@ The present document describes the specific garbage collectors that we wrote in our framework. -.. _`EU-report on this topic`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU-report on this topic`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf Garbage collectors currently written for the GC framework From noreply at buildbot.pypy.org Fri Mar 28 20:39:18 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 28 Mar 2014 20:39:18 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix for 32bit platforms again Message-ID: <20140328193918.42DFC1D2575@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70320:944bcc7beb70 Date: 2014-03-26 10:51 -0700 http://bitbucket.org/pypy/pypy/changeset/944bcc7beb70/ Log: fix for 32bit platforms again diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -208,7 +208,7 @@ from pypy.interpreter import gateway from pypy.objspace.std.smalllongobject import W_SmallLongObject def w__small(space, w_obj): - return W_SmallLongObject.fromint(space.int_w(w_obj)) + return W_SmallLongObject.frombigint(space.bigint_w(w_obj)) cls.w__small = cls.space.wrap(gateway.interp2app(w__small)) def test_smalllong(self): From noreply at buildbot.pypy.org Fri Mar 28 20:39:19 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 28 Mar 2014 20:39:19 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140328193919.A6AB31D2575@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r70321:170a3218cb26 Date: 2014-03-27 16:17 -0700 http://bitbucket.org/pypy/pypy/changeset/170a3218cb26/ Log: merge default diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -211,6 +211,9 @@ .. __: `recursion depth limit`_ +We also do not include any of the recent API additions to Stackless +Python, like ``set_atomic()``. Contributions welcome. + Recursion depth limit +++++++++++++++++++++ diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,6 +64,8 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] + # tests are not strictly ansi C compliant, compile as C++ + kwds["compile_extra"].append("/TP") # prevent linking with PythonXX.lib w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2] kwds["link_extra"] = ["/NODEFAULTLIB:Python%d%d.lib" % @@ -642,30 +644,30 @@ body = """ static PyObject* foo_pi(PyObject* self, PyObject *args) { - PyObject *true = Py_True; - int refcnt = true->ob_refcnt; + PyObject *true_obj = Py_True; + int refcnt = true_obj->ob_refcnt; int refcnt_after; - Py_INCREF(true); - Py_INCREF(true); - PyBool_Check(true); - refcnt_after = true->ob_refcnt; - Py_DECREF(true); - Py_DECREF(true); + Py_INCREF(true_obj); + Py_INCREF(true_obj); + PyBool_Check(true_obj); + refcnt_after = true_obj->ob_refcnt; + Py_DECREF(true_obj); + Py_DECREF(true_obj); fprintf(stderr, "REFCNT %i %i\\n", refcnt, refcnt_after); return PyBool_FromLong(refcnt_after == refcnt+2 && refcnt < 3); } static PyObject* foo_bar(PyObject* self, PyObject *args) { - PyObject *true = Py_True; + PyObject *true_obj = Py_True; PyObject *tup = NULL; - int refcnt = true->ob_refcnt; + int refcnt = true_obj->ob_refcnt; int refcnt_after; tup = PyTuple_New(1); - Py_INCREF(true); - if (PyTuple_SetItem(tup, 0, true) < 0) + Py_INCREF(true_obj); + if (PyTuple_SetItem(tup, 0, true_obj) < 0) return NULL; - refcnt_after = true->ob_refcnt; + refcnt_after = true_obj->ob_refcnt; Py_DECREF(tup); fprintf(stderr, "REFCNT2 %i %i\\n", refcnt, refcnt_after); return PyBool_FromLong(refcnt_after == refcnt); diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -110,10 +110,10 @@ } EnumObject; static void - enum_dealloc(EnumObject *op) + enum_dealloc(PyObject *op) { - Py_DECREF(op->ob_name); - Py_TYPE(op)->tp_free((PyObject *)op); + Py_DECREF(((EnumObject *)op)->ob_name); + Py_TYPE(op)->tp_free(op); } static PyMemberDef enum_members[] = { diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -161,15 +161,18 @@ return space.index(self.item(space)) def descr_int(self, space): - if isinstance(self, W_UnsignedIntegerBox): - box = self.convert_to(space, W_UInt64Box._get_dtype(space)) + if isinstance(self, W_ComplexFloatingBox): + box = self.descr_get_real(space) else: - box = self.convert_to(space, W_Int64Box._get_dtype(space)) - return space.int(box.item(space)) + box = self + return space.call_function(space.w_int, box.item(space)) def descr_float(self, space): - box = self.convert_to(space, W_Float64Box._get_dtype(space)) - return space.float(box.item(space)) + if isinstance(self, W_ComplexFloatingBox): + box = self.descr_get_real(space) + else: + box = self + return space.call_function(space.w_float, box.item(space)) def descr_oct(self, space): return space.call_method(space.builtin, 'oct', self.descr_int(space)) @@ -178,8 +181,7 @@ return space.call_method(space.builtin, 'hex', self.descr_int(space)) def descr_nonzero(self, space): - dtype = self.get_dtype(space) - return space.wrap(dtype.itemtype.bool(self)) + return space.wrap(self.get_dtype(space).itemtype.bool(self)) def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -48,6 +48,8 @@ order = 'C' else: order = space.str_w(w_order) + if order == 'K': + order = 'C' if order != 'C': # or order != 'F': raise oefmt(space.w_ValueError, "Unknown order: %s", order) @@ -100,7 +102,7 @@ @unwrap_spec(subok=bool) def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): w_a = convert_to_array(space, w_a) - if w_dtype is None: + if space.is_none(w_dtype): dtype = w_a.get_dtype() else: dtype = space.interp_w(descriptor.W_Dtype, diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -334,6 +334,15 @@ b = array(a, dtype=float) assert b == 123.0 + a = array([[123, 456]]) + assert a.flags['C'] + b = array(a, order='K') + assert b.flags['C'] + assert (b == a).all() + b = array(a, order='K', copy=True) + assert b.flags['C'] + assert (b == a).all() + def test_dtype_attribute(self): import numpy as np a = np.array(40000, dtype='uint16') @@ -404,6 +413,8 @@ assert b.shape == a.shape assert b.dtype == a.dtype assert b[0,0] != 1 + b = np.empty_like(np.array(True), dtype=None) + assert b.dtype is np.dtype(bool) b = np.empty_like(a, dtype='i4') assert b.shape == a.shape assert b.dtype == np.dtype('i4') diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -36,6 +36,24 @@ exc = raises(ValueError, "int(np.str_('abc'))") assert str(exc.value).startswith('invalid literal for int()') assert int(np.uint64((2<<63) - 1)) == (2<<63) - 1 + exc = raises(ValueError, "int(np.float64(np.nan))") + assert str(exc.value) == "cannot convert float NaN to integer" + exc = raises(OverflowError, "int(np.float64(np.inf))") + assert str(exc.value) == "cannot convert float infinity to integer" + assert int(np.float64(1e100)) == int(1e100) + assert long(np.float64(1e100)) == int(1e100) + assert int(np.complex128(1e100+2j)) == int(1e100) + exc = raises(OverflowError, "int(np.complex64(1e100+2j))") + assert str(exc.value) == "cannot convert float infinity to integer" + assert int(np.str_('100000000000000000000')) == 100000000000000000000 + assert long(np.str_('100000000000000000000')) == 100000000000000000000 + + assert float(np.float64(1e100)) == 1e100 + assert float(np.complex128(1e100+2j)) == 1e100 + assert float(np.str_('1e100')) == 1e100 + assert float(np.str_('inf')) == np.inf + assert str(float(np.float64(np.nan))) == 'nan' + assert oct(np.int32(11)) == '0o13' assert oct(np.float32(11.6)) == '0o13' assert oct(np.complex64(11-12j)) == '0o13' From noreply at buildbot.pypy.org Fri Mar 28 22:24:36 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 22:24:36 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Fixed the jittest traces in test_basic.py. Message-ID: <20140328212436.7EA721D29DC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r719:fbc7c14eb978 Date: 2014-03-28 20:25 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/fbc7c14eb978/ Log: Fixed the jittest traces in test_basic.py. The bitblt fillWhite trace is about 1/3 shorter than before the refactoring!!! A ton of memory accesses missing. diff --git a/spyvm/test/jittest/test_basic.py b/spyvm/test/jittest/test_basic.py --- a/spyvm/test/jittest/test_basic.py +++ b/spyvm/test/jittest/test_basic.py @@ -33,7 +33,7 @@ guard_class(p0, ConstClass(MethodContextShadow), descr=), p31 = getfield_gc(p0, descr=), guard_value(p31, ConstPtr(ptr32), descr=), - i33 = getfield_gc(p0, descr=), + i33 = getfield_gc_pure(p0, descr=), guard_not_invalidated(descr=), i35 = int_le(i16, 1000000000), guard_true(i35, descr=), @@ -126,350 +126,241 @@ 1 to: 10000 do: [:i | Display fillWhite]. """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=) - p540 = getarrayitem_gc(p152, 30, descr=) - guard_nonnull_class(p540, 19336136, descr=) - i541 = getfield_gc_pure(p540, descr=) - i543 = int_le(2, i541) - guard_false(i543, descr=) - p544 = getarrayitem_gc(p152, 34, descr=) - p545 = getarrayitem_gc(p152, 36, descr=) - guard_nonnull_class(p544, 19336136, descr=) - i546 = getfield_gc_pure(p544, descr=) - guard_nonnull_class(p545, 19336136, descr=) - i547 = getfield_gc_pure(p545, descr=) - i548 = int_add_ovf(i546, i547) - guard_no_overflow(descr=) - p549 = getarrayitem_gc(p152, 35, descr=) - p550 = getarrayitem_gc(p152, 37, descr=) - guard_nonnull_class(p549, 19336136, descr=) - i551 = getfield_gc_pure(p549, descr=) - guard_nonnull_class(p550, 19336136, descr=) - i552 = getfield_gc_pure(p550, descr=) - i553 = int_add_ovf(i551, i552) - guard_no_overflow(descr=) - i554 = int_add_ovf(i181, 1) - guard_no_overflow(descr=) - i555 = int_sub(i527, 2) - setfield_gc(ConstPtr(ptr182), i555, descr=) - i556 = int_le(i555, 0) - guard_false(i556, descr=) - p557 = getarrayitem_gc(p152, 16, descr=) - guard_nonnull_class(p557, 19336136, descr=) - i558 = getfield_gc_pure(p557, descr=) - i559 = int_le(i554, i558) - guard_true(i559, descr=) - p560 = getarrayitem_gc(p152, 2, descr=) - guard_class(p560, 19336008, descr=) - p561 = getfield_gc(p560, descr=) - guard_value(p561, ConstPtr(ptr198), descr=) - p562 = getarrayitem_gc(p152, 25, descr=) - p563 = getarrayitem_gc(p152, 20, descr=) - guard_class(p562, 19352312, descr=) - p564 = getfield_gc(p562, descr=) - guard_value(p564, ConstPtr(ptr206), descr=) - i565 = getfield_gc_pure(p562, descr=) - guard_nonnull_class(p563, 19336136, descr=) - i566 = getfield_gc_pure(p563, descr=) - i567 = int_is_zero(i565) - guard_false(i567, descr=) - i568 = int_mod(i566, i565) - i569 = int_lt(i565, 0) - guard_false(i569, descr=) - i570 = int_rshift(i568, 31) - i571 = int_and(i565, i570) - i572 = int_add(i568, i571) - i573 = int_add_ovf(1, i572) - guard_no_overflow(descr=) - i574 = int_ge(i572, 0) - guard_true(i574, descr=) - i575 = int_lt(i572, i565) - guard_true(i575, descr=) - p576 = getfield_gc(p562, descr=) - guard_nonnull(p576, descr=) - i577 = getarrayitem_gc(p576, i572, descr=) - i578 = uint_lt(i577, 0) - guard_false(i578, descr=) - i579 = uint_lt(i577, 2147483647) - guard_true(i579, descr=) - p580 = getarrayitem_gc(p152, 32, descr=) - guard_nonnull_class(p580, 19336136, descr=) - i581 = getfield_gc_pure(p580, descr=) - i582 = int_add_ovf(i566, i581) - guard_no_overflow(descr=) - p583 = getarrayitem_gc(p152, 33, descr=) - i584 = instance_ptr_eq(p583, ConstPtr(ptr236)) - guard_true(i584, descr=) - p585 = getarrayitem_gc(p152, 27, descr=) - i586 = int_le(1, i541) - guard_true(i586, descr=) - p587 = getarrayitem_gc(p152, 1, descr=) - guard_class(p587, 19336008, descr=) - p588 = getfield_gc(p587, descr=) - guard_value(p588, ConstPtr(ptr246), descr=) - i589 = int_ge(i577, 0) - guard_true(i589, descr=) - i590 = int_and(i577, i577) - i591 = uint_lt(i590, 2147483647) - guard_true(i591, descr=) - p592 = getarrayitem_gc(p152, 21, descr=) - i593 = int_add_ovf(i553, 1) - guard_no_overflow(descr=) - guard_class(p592, 19375152, descr=) - p594 = getfield_gc(p592, descr=) - guard_value(p594, ConstPtr(ptr258), descr=) - i595 = int_ge(i553, 0) - guard_true(i595, descr=) - i596 = getfield_gc_pure(p592, descr=) - i597 = int_lt(i553, i596) - guard_true(i597, descr=) - i598 = getfield_gc(p592, descr=) - i599 = getarrayitem_raw(i598, i553, descr=) - i600 = uint_lt(i599, 0) - guard_false(i600, descr=) - i601 = uint_lt(i599, 2147483647) - guard_true(i601, descr=) - p602 = getarrayitem_gc(p152, 3, descr=) - setfield_gc(ConstPtr(ptr182), i276, descr=) - guard_nonnull_class(p602, 19336136, descr=) - i603 = getfield_gc_pure(p602, descr=) - i604 = int_lt(i603, 16) - guard_true(i604, descr=) - i605 = int_eq(i603, 0) - guard_false(i605, descr=) - i606 = int_eq(i603, 1) - guard_false(i606, descr=) - i607 = int_eq(i603, 2) - guard_false(i607, descr=) - i608 = int_eq(i603, 3) - guard_true(i608, descr=) - setfield_gc(ConstPtr(ptr182), i272, descr=) - guard_nonnull_class(p585, 19351904, descr=) - i609 = getfield_gc(p585, descr=) - i610 = int_and(i609, i590) - i611 = uint_lt(i610, 2147483647) - guard_true(i611, descr=) - i612 = int_xor(i609, i297) - setfield_gc(ConstPtr(ptr182), i276, descr=) - i613 = uint_lt(i612, 2147483647) - guard_true(i613, descr=) - i614 = getarrayitem_raw(i598, i553, descr=) - setfield_gc(ConstPtr(ptr182), i272, descr=) - i615 = uint_lt(i614, 0) - guard_false(i615, descr=) - i616 = uint_lt(i614, 2147483647) - guard_true(i616, descr=) - i617 = int_ge(i612, 0) - guard_true(i617, descr=) - i618 = int_ge(i614, 0) - guard_true(i618, descr=) - i619 = int_and(i612, i614) - i620 = uint_lt(i619, 2147483647) - guard_true(i620, descr=) - i621 = int_ge(i610, 0) - guard_true(i621, descr=) - i622 = int_or(i610, i619) - i623 = uint_lt(i622, 2147483647) - guard_true(i623, descr=) - setarrayitem_raw(i598, i553, i622, descr=) - i624 = getfield_gc_pure(p592, descr=) - i625 = int_floordiv(8, i624) - i626 = int_mul(i625, i624) - i627 = int_lt(i624, 0) - guard_false(i627, descr=) - i628 = int_sub(8, i626) - i629 = int_rshift(i628, 31) - i630 = int_add(i625, i629) - i631 = int_mul(i553, i630) - i632 = int_sub(32, i624) - i633 = int_ge(0, i630) - guard_false(i633, descr=) - i634 = int_ge(i631, i596) - guard_false(i634, descr=) - i635 = uint_rshift(i622, i632) - i636 = int_lshift(i622, i624) - i637 = uint_rshift(i636, i632) - i638 = int_lshift(i637, 8) - i639 = int_or(i635, i638) - i640 = int_lshift(i636, i624) - i641 = uint_rshift(i640, i632) - i642 = int_lshift(i641, 16) - i643 = int_or(i639, i642) - i644 = int_lshift(i640, i624) - i645 = uint_rshift(i644, i632) - i646 = int_lshift(i645, 24) - i647 = int_or(i643, i646) - i648 = int_lshift(i644, i624) - p649 = getfield_gc_pure(p592, descr=) - i650 = getfield_gc(p649, descr=) - guard_value(i650, 59506760, descr=) - setarrayitem_raw(59506760, i631, i647, descr=) - i651 = int_add(i631, 1) - i652 = int_ge(1, i630) - guard_false(i652, descr=) - i653 = int_ge(i651, i596) - guard_false(i653, descr=) - i654 = uint_rshift(i648, i632) - i655 = int_lshift(i648, i624) - i656 = uint_rshift(i655, i632) - i657 = int_lshift(i656, 8) - i658 = int_or(i654, i657) - i659 = int_lshift(i655, i624) - i660 = uint_rshift(i659, i632) - i661 = int_lshift(i660, 16) - i662 = int_or(i658, i661) - i663 = int_lshift(i659, i624) - i664 = uint_rshift(i663, i632) - i665 = int_lshift(i664, 24) - i666 = int_or(i662, i665) - i667 = int_lshift(i663, i624) - setarrayitem_raw(59506760, i651, i666, descr=) - i668 = int_add(i651, 1) - i669 = int_ge(2, i630) - guard_false(i669, descr=) - i670 = int_ge(i668, i596) - guard_false(i670, descr=) - i671 = uint_rshift(i667, i632) - i672 = int_lshift(i667, i624) - i673 = uint_rshift(i672, i632) - i674 = int_lshift(i673, 8) - i675 = int_or(i671, i674) - i676 = int_lshift(i672, i624) - i677 = uint_rshift(i676, i632) - i678 = int_lshift(i677, 16) - i679 = int_or(i675, i678) - i680 = int_lshift(i676, i624) - i681 = uint_rshift(i680, i632) - i682 = int_lshift(i681, 24) - i683 = int_or(i679, i682) - i684 = int_lshift(i680, i624) - setarrayitem_raw(59506760, i668, i683, descr=) - i685 = int_add(i668, 1) - i686 = int_ge(3, i630) - guard_false(i686, descr=) - i687 = int_ge(i685, i596) - guard_false(i687, descr=) - i688 = uint_rshift(i684, i632) - i689 = int_lshift(i684, i624) - i690 = uint_rshift(i689, i632) - i691 = int_lshift(i690, 8) - i692 = int_or(i688, i691) - i693 = int_lshift(i689, i624) - i694 = uint_rshift(i693, i632) - i695 = int_lshift(i694, 16) - i696 = int_or(i692, i695) - i697 = int_lshift(i693, i624) - i698 = uint_rshift(i697, i632) - i699 = int_lshift(i698, 24) - i700 = int_or(i696, i699) - i701 = int_lshift(i697, i624) - setarrayitem_raw(59506760, i685, i700, descr=) - i702 = int_add(i685, 1) - i703 = int_ge(4, i630) - guard_false(i703, descr=) - i704 = int_ge(i702, i596) - guard_false(i704, descr=) - i705 = uint_rshift(i701, i632) - i706 = int_lshift(i701, i624) - i707 = uint_rshift(i706, i632) - i708 = int_lshift(i707, 8) - i709 = int_or(i705, i708) - i710 = int_lshift(i706, i624) - i711 = uint_rshift(i710, i632) - i712 = int_lshift(i711, 16) - i713 = int_or(i709, i712) - i714 = int_lshift(i710, i624) - i715 = uint_rshift(i714, i632) - i716 = int_lshift(i715, 24) - i717 = int_or(i713, i716) - i718 = int_lshift(i714, i624) - setarrayitem_raw(59506760, i702, i717, descr=) - i719 = int_add(i702, 1) - i720 = int_ge(5, i630) - guard_false(i720, descr=) - i721 = int_ge(i719, i596) - guard_false(i721, descr=) - i722 = uint_rshift(i718, i632) - i723 = int_lshift(i718, i624) - i724 = uint_rshift(i723, i632) - i725 = int_lshift(i724, 8) - i726 = int_or(i722, i725) - i727 = int_lshift(i723, i624) - i728 = uint_rshift(i727, i632) - i729 = int_lshift(i728, 16) - i730 = int_or(i726, i729) - i731 = int_lshift(i727, i624) - i732 = uint_rshift(i731, i632) - i733 = int_lshift(i732, 24) - i734 = int_or(i730, i733) - i735 = int_lshift(i731, i624) - setarrayitem_raw(59506760, i719, i734, descr=) - i736 = int_add(i719, 1) - i737 = int_ge(6, i630) - guard_false(i737, descr=) - i738 = int_ge(i736, i596) - guard_false(i738, descr=) - i739 = uint_rshift(i735, i632) - i740 = int_lshift(i735, i624) - i741 = uint_rshift(i740, i632) - i742 = int_lshift(i741, 8) - i743 = int_or(i739, i742) - i744 = int_lshift(i740, i624) - i745 = uint_rshift(i744, i632) - i746 = int_lshift(i745, 16) - i747 = int_or(i743, i746) - i748 = int_lshift(i744, i624) - i749 = uint_rshift(i748, i632) - i750 = int_lshift(i749, 24) - i751 = int_or(i747, i750) - i752 = int_lshift(i748, i624) - setarrayitem_raw(59506760, i736, i751, descr=) - i753 = int_add(i736, 1) - i754 = int_ge(7, i630) - guard_false(i754, descr=) - i755 = int_ge(i753, i596) - guard_false(i755, descr=) - i756 = uint_rshift(i752, i632) - i757 = int_lshift(i752, i624) - i758 = uint_rshift(i757, i632) - i759 = int_lshift(i758, 8) - i760 = int_or(i756, i759) - i761 = int_lshift(i757, i624) - i762 = uint_rshift(i761, i632) - i763 = int_lshift(i762, 16) - i764 = int_or(i760, i763) - i765 = int_lshift(i761, i624) - i766 = uint_rshift(i765, i632) - i767 = int_lshift(i766, 24) - i768 = int_or(i764, i767) - i769 = int_lshift(i765, i624) - setarrayitem_raw(59506760, i753, i768, descr=) - i770 = int_add(i753, 1) - i771 = int_ge(8, i630) - guard_true(i771, descr=) - p772 = getarrayitem_gc(p152, 31, descr=) - guard_nonnull_class(p772, 19336136, descr=) - i773 = getfield_gc_pure(p772, descr=) - i774 = int_add_ovf(i548, i773) - guard_no_overflow(descr=) - i775 = int_add_ovf(i553, i773) - guard_no_overflow(descr=) - p776 = getarrayitem_gc(ConstPtr(ptr523), 1, descr=) - i777 = int_sub(i555, 24) - setfield_gc(ConstPtr(ptr182), i777, descr=) - i778 = int_le(i777, 0) - guard_false(i778, descr=) - p779 = new_with_vtable(19336136) - setfield_gc(p779, i774, descr=) - setarrayitem_gc(p152, 34, p779, descr=) - p780 = new_with_vtable(19336136) - setfield_gc(p780, i775, descr=) - setarrayitem_gc(p152, 35, p780, descr=) - p781 = new_with_vtable(19336136) - setfield_gc(p781, i582, descr=) - setarrayitem_gc(p152, 20, p781, descr=) - i782 = arraylen_gc(p152, descr=) - jump(p0, p3, p8, i577, p776, i590, p18, i554, p38, p40, p42, p44, p46, p48, p50, p52, p54, p56, p58, p60, p62, p64, p66, p68, p70, p72, p74, p76, p78, p80, p82, p84, p86, p88, p90, p92, p94, p96, p98, p100, p102, p104, p106, p108, p110, p112, p114, p116, p118, p120, p122, p124, p126, p128, p130, p132, p134, i139, p152, i777, p141, i276, i272, i297, descr=TargetToken(61930080)) + guard_not_invalidated(descr=), + i584 = int_le(2, i151), + guard_false(i584, descr=), + i585 = getfield_gc_pure(p576, descr=), + i586 = int_add_ovf(i585, i160), + guard_no_overflow(descr=), + i587 = getfield_gc_pure(p579, descr=), + i588 = int_add_ovf(i587, i169), + guard_no_overflow(descr=), + i589 = int_add_ovf(i174, 1), + guard_no_overflow(descr=), + i590 = int_sub(i572, 2), + setfield_gc(ConstPtr(ptr175), i590, descr=), + i591 = int_le(i590, 0), + guard_false(i591, descr=), + i592 = int_le(i589, i185), + guard_true(i592, descr=), + i593 = getfield_gc_pure(p350, descr=), + i594 = int_mod(i593, i219), + i595 = int_rshift(i594, 31), + i596 = int_and(i219, i595), + i597 = int_add(i594, i596), + i598 = int_add_ovf(1, i597), + guard_no_overflow(descr=), + i599 = int_ge(i597, 0), + guard_true(i599, descr=), + i600 = int_lt(i597, i219), + guard_true(i600, descr=), + i601 = getarrayitem_gc(p241, i597, descr=), + i602 = uint_lt(i601, 0), + guard_false(i602, descr=), + i603 = uint_lt(i601, 2147483647), + guard_true(i603, descr=), + i604 = int_add_ovf(i593, i250), + guard_no_overflow(descr=), + i605 = int_ge(i601, 0), + guard_true(i605, descr=), + i606 = int_and(i601, i601), + i607 = uint_lt(i606, 2147483647), + guard_true(i607, descr=), + i608 = int_add_ovf(i588, 1), + guard_no_overflow(descr=), + i609 = int_ge(i588, 0), + guard_true(i609, descr=), + i610 = int_lt(i588, i281), + guard_true(i610, descr=), + i611 = getarrayitem_raw(i283, i588, descr=), + i612 = uint_lt(i611, 0), + guard_false(i612, descr=), + i613 = uint_lt(i611, 2147483647), + guard_true(i613, descr=), + i614 = int_and(i318, i606), + i615 = uint_lt(i614, 2147483647), + guard_true(i615, descr=), + i616 = getarrayitem_raw(i283, i588, descr=), + i617 = uint_lt(i616, 0), + guard_false(i617, descr=), + i618 = uint_lt(i616, 2147483647), + guard_true(i618, descr=), + i619 = int_ge(i616, 0), + guard_true(i619, descr=), + i620 = int_and(i329, i616), + i621 = uint_lt(i620, 2147483647), + guard_true(i621, descr=), + i622 = int_ge(i614, 0), + guard_true(i622, descr=), + i623 = int_or(i614, i620), + i624 = uint_lt(i623, 2147483647), + guard_true(i624, descr=), + setarrayitem_raw(i283, i588, i623, descr=), + i626 = int_lshift(i588, 3), + i627 = int_ge(i626, i281), + guard_false(i627, descr=), + i628 = uint_rshift(i623, i373), + i629 = int_lshift(i623, i360), + i630 = uint_rshift(i629, i373), + i631 = int_lshift(i630, 8), + i632 = int_or(i628, i631), + i633 = int_lshift(i629, i360), + i634 = uint_rshift(i633, i373), + i635 = int_lshift(i634, 16), + i636 = int_or(i632, i635), + i637 = int_lshift(i633, i360), + i638 = uint_rshift(i637, i373), + i639 = int_lshift(i638, 24), + i640 = int_or(i636, i639), + i641 = int_lshift(i637, i360), + setarrayitem_raw(51118152, i626, i640, descr=), + i642 = int_add(i626, 1), + i643 = int_ge(i642, i281), + guard_false(i643, descr=), + i644 = uint_rshift(i641, i373), + i645 = int_lshift(i641, i360), + i646 = uint_rshift(i645, i373), + i647 = int_lshift(i646, 8), + i648 = int_or(i644, i647), + i649 = int_lshift(i645, i360), + i650 = uint_rshift(i649, i373), + i651 = int_lshift(i650, 16), + i652 = int_or(i648, i651), + i653 = int_lshift(i649, i360), + i654 = uint_rshift(i653, i373), + i655 = int_lshift(i654, 24), + i656 = int_or(i652, i655), + i657 = int_lshift(i653, i360), + setarrayitem_raw(51118152, i642, i656, descr=), + i658 = int_add(i642, 1), + i659 = int_ge(i658, i281), + guard_false(i659, descr=), + i660 = uint_rshift(i657, i373), + i661 = int_lshift(i657, i360), + i662 = uint_rshift(i661, i373), + i663 = int_lshift(i662, 8), + i664 = int_or(i660, i663), + i665 = int_lshift(i661, i360), + i666 = uint_rshift(i665, i373), + i667 = int_lshift(i666, 16), + i668 = int_or(i664, i667), + i669 = int_lshift(i665, i360), + i670 = uint_rshift(i669, i373), + i671 = int_lshift(i670, 24), + i672 = int_or(i668, i671), + i673 = int_lshift(i669, i360), + setarrayitem_raw(51118152, i658, i672, descr=), + i674 = int_add(i658, 1), + i675 = int_ge(i674, i281), + guard_false(i675, descr=), + i676 = uint_rshift(i673, i373), + i677 = int_lshift(i673, i360), + i678 = uint_rshift(i677, i373), + i679 = int_lshift(i678, 8), + i680 = int_or(i676, i679), + i681 = int_lshift(i677, i360), + i682 = uint_rshift(i681, i373), + i683 = int_lshift(i682, 16), + i684 = int_or(i680, i683), + i685 = int_lshift(i681, i360), + i686 = uint_rshift(i685, i373), + i687 = int_lshift(i686, 24), + i688 = int_or(i684, i687), + i689 = int_lshift(i685, i360), + setarrayitem_raw(51118152, i674, i688, descr=), + i690 = int_add(i674, 1), + i691 = int_ge(i690, i281), + guard_false(i691, descr=), + i692 = uint_rshift(i689, i373), + i693 = int_lshift(i689, i360), + i694 = uint_rshift(i693, i373), + i695 = int_lshift(i694, 8), + i696 = int_or(i692, i695), + i697 = int_lshift(i693, i360), + i698 = uint_rshift(i697, i373), + i699 = int_lshift(i698, 16), + i700 = int_or(i696, i699), + i701 = int_lshift(i697, i360), + i702 = uint_rshift(i701, i373), + i703 = int_lshift(i702, 24), + i704 = int_or(i700, i703), + i705 = int_lshift(i701, i360), + setarrayitem_raw(51118152, i690, i704, descr=), + i706 = int_add(i690, 1), + i707 = int_ge(i706, i281), + guard_false(i707, descr=), + i708 = uint_rshift(i705, i373), + i709 = int_lshift(i705, i360), + i710 = uint_rshift(i709, i373), + i711 = int_lshift(i710, 8), + i712 = int_or(i708, i711), + i713 = int_lshift(i709, i360), + i714 = uint_rshift(i713, i373), + i715 = int_lshift(i714, 16), + i716 = int_or(i712, i715), + i717 = int_lshift(i713, i360), + i718 = uint_rshift(i717, i373), + i719 = int_lshift(i718, 24), + i720 = int_or(i716, i719), + i721 = int_lshift(i717, i360), + setarrayitem_raw(51118152, i706, i720, descr=), + i722 = int_add(i706, 1), + i723 = int_ge(i722, i281), + guard_false(i723, descr=), + i724 = uint_rshift(i721, i373), + i725 = int_lshift(i721, i360), + i726 = uint_rshift(i725, i373), + i727 = int_lshift(i726, 8), + i728 = int_or(i724, i727), + i729 = int_lshift(i725, i360), + i730 = uint_rshift(i729, i373), + i731 = int_lshift(i730, 16), + i732 = int_or(i728, i731), + i733 = int_lshift(i729, i360), + i734 = uint_rshift(i733, i373), + i735 = int_lshift(i734, 24), + i736 = int_or(i732, i735), + i737 = int_lshift(i733, i360), + setarrayitem_raw(51118152, i722, i736, descr=), + i738 = int_add(i722, 1), + i739 = int_ge(i738, i281), + guard_false(i739, descr=), + i740 = uint_rshift(i737, i373), + i741 = int_lshift(i737, i360), + i742 = uint_rshift(i741, i373), + i743 = int_lshift(i742, 8), + i744 = int_or(i740, i743), + i745 = int_lshift(i741, i360), + i746 = uint_rshift(i745, i373), + i747 = int_lshift(i746, 16), + i748 = int_or(i744, i747), + i749 = int_lshift(i745, i360), + i750 = uint_rshift(i749, i373), + i751 = int_lshift(i750, 24), + i752 = int_or(i748, i751), + i753 = int_lshift(i749, i360), + setarrayitem_raw(51118152, i738, i752, descr=), + i754 = int_add(i738, 1), + i755 = int_add_ovf(i586, i558), + guard_no_overflow(descr=), + i756 = int_add_ovf(i588, i558), + guard_no_overflow(descr=), + i757 = int_sub(i590, 23), + setfield_gc(ConstPtr(ptr175), i757, descr=), + i758 = int_le(i757, 0), + guard_false(i758, descr=), + p759 = new_with_vtable(18295080), + setfield_gc(p759, i755, descr=), + setarrayitem_gc(p145, 34, p759, descr=), + p760 = new_with_vtable(18295080), + setfield_gc(p760, i756, descr=), + setarrayitem_gc(p145, 35, p760, descr=), + p761 = new_with_vtable(18295080), + setfield_gc(p761, i604, descr=), + setarrayitem_gc(p145, 20, p761, descr=), + i762 = arraylen_gc(p145, descr=), + i763 = arraylen_gc(p568, descr=), + jump(p0, p3, p8, i601, p582, i606, p18, i589, p38, p40, p42, p44, p46, p48, p50, p52, p54, p56, p58, p60, p62, p64, p66, p68, p70, p72, p74, p76, p78, p80, p82, p84, p86, p88, p90, p92, p94, p96, p98, p100, p102, p104, p106, p108, p110, p112, p114, p116, p118, p120, p122, p124, p126, p128, p130, p132, p134, i139, 1, p147, p759, i160, p156, p760, i169, p165, p145, i757, i185, p182, p761, i219, p195, p241, i250, p248, p257, p141, p272, i281, i283, i318, i329, i373, i360, i558, p556, p582, p568, descr=TargetToken(53262992)) """) @py.test.mark.skipif("'just dozens of long traces'") From noreply at buildbot.pypy.org Fri Mar 28 22:24:37 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 22:24:37 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Fixed the traces in test_strategies.py. Message-ID: <20140328212437.A389B1D29DC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r720:a7ee5ef32eea Date: 2014-03-28 20:43 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a7ee5ef32eea/ Log: Fixed the traces in test_strategies.py. The problem with the collect-trace still exists (a bit shorter though). But check out the indexOf trace, super-short!! diff --git a/spyvm/test/jittest/test_strategies.py b/spyvm/test/jittest/test_strategies.py --- a/spyvm/test/jittest/test_strategies.py +++ b/spyvm/test/jittest/test_strategies.py @@ -12,85 +12,80 @@ (1 to: 10000) asOrderedCollection. """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=), - p195 = getarrayitem_gc(p59, 1, descr=), - guard_nonnull_class(p195, 19336136, descr=), - i196 = getfield_gc_pure(p195, descr=), - i197 = int_ge(i196, i189), - guard_true(i197, descr=), - cond_call(i84, 18017728, p76, descr=), - cond_call(i114, 18017728, p102, descr=), - cond_call(i114, 18017728, p102, descr=), - p198 = getarrayitem_gc(p116, 0, descr=), - cond_call(i114, 18017728, p102, descr=), - p200 = new_with_vtable(19336136), - setfield_gc(p200, i189, descr=), - setarrayitem_gc(p116, 1, p200, descr=), - setarrayitem_gc(p88, 0, p198, descr=), - setfield_gc(p76, 2, descr=), - setfield_gc(p76, 15, descr=), - setfield_gc(p76, p0, descr=), - setfield_gc(ConstPtr(ptr90), i97, descr=), - setarrayitem_gc(p88, 1, p200, descr=), - guard_class(p198, 19336008, descr=), - p201 = getfield_gc(p198, descr=), - guard_value(p201, ConstPtr(ptr128), descr=), - p202 = call(ConstClass(elidable_func__star_0), p198, ConstPtr(ptr139), descr=), - setarrayitem_gc(p88, 0, ConstPtr(null), descr=), - setfield_gc(p76, 0, descr=), - setfield_gc(ConstPtr(ptr90), i137, descr=), - setarrayitem_gc(p88, 1, ConstPtr(null), descr=), - guard_isnull(p202, descr=), - p205 = call(ConstClass(elidable_func__star_0), p198, ConstPtr(ptr143), descr=), - guard_class(p205, 19337240, descr=), - p206 = call(ConstClass(elidable_func__star_0), p198, ConstPtr(ptr147), descr=), - p207 = getarrayitem_gc(p206, 2, descr=), - p208 = getarrayitem_gc(p206, 0, descr=), - guard_class(p208, 19336008, descr=), - p209 = getfield_gc(p208, descr=), - guard_value(p209, ConstPtr(ptr155), descr=), - p210 = call(ConstClass(elidable_func__star_0), p208, ConstPtr(ptr157), descr=), - guard_isnull(p210, descr=), - p211 = call(ConstClass(elidable_func__star_0), p208, ConstPtr(ptr160), descr=), - guard_class(p211, 19355992, descr=), - p212 = call(ConstClass(elidable_func__star_0), p208, ConstPtr(ptr164), descr=), - p213 = getfield_gc_pure(p212, descr=), - i214 = arraylen_gc(p213, descr=), - guard_nonnull_class(p207, 19336136, descr=), - i215 = getfield_gc_pure(p207, descr=), - i216 = int_eq(i215, i214), - guard_false(i216, descr=), - i217 = int_add_ovf(i215, 1), - guard_no_overflow(descr=), - i218 = int_ge(i215, 0), - guard_true(i218, descr=), - i219 = int_lt(i215, i214), - guard_true(i219, descr=), - i220 = getfield_gc(p212, descr=), - i221 = int_eq(i215, i220), - guard_true(i221, descr=), - i222 = int_add(i220, 1), - setarrayitem_gc(p213, i215, i189, descr=), - p223 = new_with_vtable(19336136), - setfield_gc(p223, i217, descr=), - setarrayitem_gc(p206, 2, p223, descr=), - p224 = getarrayitem_gc(p59, 2, descr=), - setfield_gc(p76, -1, descr=), - setfield_gc(p76, ConstPtr(null), descr=), - setfield_gc(ConstPtr(ptr90), i93, descr=), - setfield_gc(p212, i222, descr=), - guard_nonnull_class(p224, 19336136, descr=), - i225 = getfield_gc_pure(p224, descr=), - i226 = int_add_ovf(i189, i225), - guard_no_overflow(descr=), - i227 = int_sub(i192, 8), - setfield_gc(ConstPtr(ptr90), i227, descr=), - i228 = int_le(i227, 0), - guard_false(i228, descr=), - i229 = arraylen_gc(p59, descr=), - i230 = arraylen_gc(p88, descr=), - i231 = arraylen_gc(p116, descr=), - jump(p0, p3, p6, i226, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, i47, p59, i84, p76, i114, p102, p116, p88, i97, i99, i137, i93, i227, descr=TargetToken(56538256)) + guard_not_invalidated(descr=), + i190 = getarrayitem_gc(p52, 1, descr=), + i191 = int_eq(i190, 2147483647), + guard_false(i191, descr=), + i192 = int_ge(i190, i184), + guard_true(i192, descr=), + cond_call(i74, 16971392, p66, descr=), + cond_call(i102, 16971392, p90, descr=), + cond_call(i102, 16971392, p90, descr=), + p193 = getarrayitem_gc(p104, 0, descr=), + cond_call(i102, 16971392, p90, descr=), + p195 = new_with_vtable(18295080), + setfield_gc(p195, i184, descr=), + setarrayitem_gc(p104, 1, p195, descr=), + setarrayitem_gc(p78, 0, p193, descr=), + setfield_gc(p66, 2, descr=), + setfield_gc(p66, 15, descr=), + setfield_gc(p66, p0, descr=), + setfield_gc(ConstPtr(ptr80), i87, descr=), + setarrayitem_gc(p78, 1, p195, descr=), + guard_class(p193, 18294904, descr=), + p196 = getfield_gc(p193, descr=), + p197 = getfield_gc(p196, descr=), + guard_value(p197, ConstPtr(ptr117), descr=), + p198 = getfield_gc(p193, descr=), + setarrayitem_gc(p78, 0, ConstPtr(null), descr=), + setfield_gc(p66, 0, descr=), + setfield_gc(ConstPtr(ptr80), i131, descr=), + setarrayitem_gc(p78, 1, ConstPtr(null), descr=), + guard_class(p198, ConstClass(ListStorageShadow), descr=), + p201 = getfield_gc_pure(p198, descr=), + p202 = getarrayitem_gc(p201, 2, descr=), + p203 = getarrayitem_gc(p201, 0, descr=), + guard_class(p203, 18294904, descr=), + p204 = getfield_gc(p203, descr=), + p205 = getfield_gc(p204, descr=), + guard_value(p205, ConstPtr(ptr149), descr=), + p206 = getfield_gc(p203, descr=), + guard_nonnull_class(p206, 18300088, descr=), + p207 = getfield_gc_pure(p206, descr=), + i208 = arraylen_gc(p207, descr=), + i209 = getfield_gc_pure(p206, descr=), + guard_nonnull_class(p202, 18295080, descr=), + i210 = getfield_gc_pure(p202, descr=), + i211 = int_eq(i210, i208), + guard_false(i211, descr=), + i212 = int_add_ovf(i210, 1), + guard_no_overflow(descr=), + i213 = int_ge(i210, 0), + guard_true(i213, descr=), + i214 = int_lt(i210, i208), + guard_true(i214, descr=), + i215 = int_eq(i184, 2147483647), + guard_false(i215, descr=), + setarrayitem_gc(p207, i210, i184, descr=), + i216 = getarrayitem_gc(p52, 2, descr=), + setfield_gc(p66, -1, descr=), + setfield_gc(p66, ConstPtr(null), descr=), + setfield_gc(ConstPtr(ptr80), i83, descr=), + i217 = int_eq(i216, 2147483647), + guard_false(i217, descr=), + i218 = int_add_ovf(i184, i216), + guard_no_overflow(descr=), + i219 = int_sub(i187, 7), + setfield_gc(ConstPtr(ptr80), i219, descr=), + i220 = int_le(i219, 0), + guard_false(i220, descr=), + p221 = new_with_vtable(18295080), + setfield_gc(p221, i212, descr=), + setarrayitem_gc(p201, 2, p221, descr=), + i222 = arraylen_gc(p52, descr=), + i223 = arraylen_gc(p78, descr=), + i224 = arraylen_gc(p104, descr=), + jump(p0, p3, p6, i218, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, i47, p52, i74, p66, i102, p90, p104, p78, i87, i89, i131, i119, p145, i83, i219, descr=TargetToken(53064608)) """) def test_indexOf(self, spy, tmpdir): @@ -100,50 +95,32 @@ """) # First loop: asOrderedCollection, second loop: makeRoomAtLast self.assert_matches(traces[2].loop, """ - guard_not_invalidated(descr=), - i136 = int_le(i130, i62), - guard_true(i136, descr=), - p137 = getarrayitem_gc(p88, 1, descr=), - setfield_gc(ConstPtr(ptr68), i75, descr=), - guard_nonnull_class(p137, 19336136, descr=), - i138 = getfield_gc_pure(p137, descr=), - i139 = int_add_ovf(i130, i138), - guard_no_overflow(descr=), - i140 = int_sub(i139, 1), - p141 = getarrayitem_gc(p88, 2, descr=), - guard_nonnull_class(p141, 19336136, descr=), - i142 = getfield_gc_pure(p141, descr=), - i143 = int_gt(i140, i142), - guard_false(i143, descr=), - p144 = getarrayitem_gc(p88, 0, descr=), - guard_class(p144, 19336008, descr=), - p145 = getfield_gc(p144, descr=), - guard_value(p145, ConstPtr(ptr105), descr=), - i146 = int_sub(i140, 1), - i147 = int_ge(i146, 0), - guard_true(i147, descr=), - p148 = call(ConstClass(elidable_func__star_0), p144, ConstPtr(ptr111), descr=), - guard_isnull(p148, descr=), - p149 = call(ConstClass(elidable_func__star_0), p144, ConstPtr(ptr114), descr=), - guard_class(p149, 19374540, descr=), - p150 = call(ConstClass(elidable_func__star_0), p144, ConstPtr(ptr118), descr=), - p151 = getfield_gc_pure(p150, descr=), - i152 = arraylen_gc(p151, descr=), - i153 = int_lt(i146, i152), - guard_true(i153, descr=), - p154 = getfield_gc_pure(p150, descr=), - i155 = getarrayitem_gc(p154, i146, descr=), - guard_false(i155, descr=), - i156 = getarrayitem_gc(p151, i146, descr=), - setfield_gc(ConstPtr(ptr68), i71, descr=), - i157 = int_eq(i156, i127), - guard_false(i157, descr=), - i158 = int_add_ovf(i130, 1), - guard_no_overflow(descr=), - i159 = int_sub(i133, 6), - setfield_gc(ConstPtr(ptr68), i159, descr=), - i160 = int_le(i159, 0), - guard_false(i160, descr=), - i161 = arraylen_gc(p88, descr=), - jump(p0, p3, p6, p8, p10, i158, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, p48, p50, p52, i57, i62, p88, i75, p64, i71, i127, i159, descr=TargetToken(49528736)) + guard_not_invalidated(descr=), + i143 = int_le(i137, i62), + guard_true(i143, descr=), + setfield_gc(ConstPtr(ptr84), i91, descr=), + i144 = int_add_ovf(i137, i100), + guard_no_overflow(descr=), + i145 = int_sub(i144, 1), + i146 = int_gt(i145, i108), + guard_false(i146, descr=), + i147 = int_sub(i145, 1), + i148 = int_ge(i147, 0), + guard_true(i148, descr=), + i149 = int_lt(i147, i127), + guard_true(i149, descr=), + i150 = getarrayitem_gc(p126, i147, descr=), + i151 = int_eq(i150, 2147483647), + guard_false(i151, descr=), + setfield_gc(ConstPtr(ptr84), i87, descr=), + i152 = int_eq(i150, i134), + guard_false(i152, descr=), + i153 = int_add_ovf(i137, 1), + guard_no_overflow(descr=), + i154 = int_sub(i140, 5), + setfield_gc(ConstPtr(ptr84), i154, descr=), + i155 = int_le(i154, 0), + guard_false(i155, descr=), + i156 = arraylen_gc(p96, descr=), + jump(p0, p3, p6, p8, p10, i153, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, p48, p50, p52, i57, i62, i91, i100, p64, p98, i80, i108, p105, p111, i127, p126, i87, i134, i154, p96, descr=TargetToken(51324000)) """) From noreply at buildbot.pypy.org Fri Mar 28 22:24:38 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 28 Mar 2014 22:24:38 +0100 (CET) Subject: [pypy-commit] lang-smalltalk storage: Extracted small library to parse jit traces. Message-ID: <20140328212438.BE5F21D29DC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r721:c7f1757381ce Date: 2014-03-28 22:24 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/c7f1757381ce/ Log: Extracted small library to parse jit traces. Used that library for small script to split a trace file into multiple parts for better analysis. diff --git a/spyvm/test/jittest/base.py b/spyvm/test/jittest/base.py --- a/spyvm/test/jittest/base.py +++ b/spyvm/test/jittest/base.py @@ -1,14 +1,9 @@ import subprocess import os - -# TODO: -from rpython.tool.jitlogparser.parser import SimpleParser, Op -from rpython.tool.jitlogparser.storage import LoopStorage - +from rpython.tool.jitlogparser.parser import Op from rpython.jit.metainterp.resoperation import opname from rpython.jit.tool import oparser -from rpython.tool import logparser - +from spyvm.tool import logparser BasePath = os.path.abspath( os.path.join( @@ -21,26 +16,15 @@ class BaseJITTest(object): def run(self, spy, tmpdir, code): + logfile = str(tmpdir.join("x.pypylog")) proc = subprocess.Popen( [str(spy), "-r", code.replace("\n", "\r\n"), BenchmarkImage], cwd=str(tmpdir), - env={"PYPYLOG": "jit-log-opt:%s" % tmpdir.join("x.pypylog"), + env={"PYPYLOG": "jit-log-opt:%s" % logfile, "SDL_VIDEODRIVER": "dummy"} ) proc.wait() - data = logparser.parse_log_file(str(tmpdir.join("x.pypylog")), verbose=False) - data = logparser.extract_category(data, "jit-log-opt-") - - storage = LoopStorage() - traces = [SimpleParser.parse_from_input(t) for t in data] - main_loops = storage.reconnect_loops(traces) - traces_w = [] - for trace in traces: - if trace in main_loops: - traces_w.append(Trace(trace)) - else: - traces_w[len(traces_w) - 1].addbridge(trace) - return traces_w + return logparser.extract_traces(logfile) def assert_matches(self, trace, expected): expected_lines = [ @@ -65,7 +49,6 @@ aliases[arg] = arg = expected_arg assert arg == expected_arg - class Parser(oparser.OpParser): def get_descr(self, poss_descr, allow_invent): if poss_descr.startswith(("TargetToken", " Author: Anton Gulenko Branch: storage Changeset: r722:55415cc1047d Date: 2014-03-29 00:01 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/55415cc1047d/ Log: Changed sentinel-value of Float-storage to a regular float (max_float). The longlong/lltype cast operations take too long. diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1,4 +1,4 @@ -import weakref +import sys, weakref from spyvm import model, constants, error, wrapper, version from spyvm.version import elidable_for_version, constant_for_version from rpython.tool.pairtype import extendabletype @@ -141,9 +141,7 @@ class FloatOrNilStorageShadow(AbstractStorageShadow): repr_classname = "FloatOrNilStorageShadow" - # TODO -- use another value... something like max_float? - nil_value = runpack("d", "\x10\x00\x00\x00\x00\x00\xf8\x7f") - nil_value_longlong = longlong2float.float2longlong(nil_value) + nil_value = sys.float_info.max wrapper_class = model.W_Float import_from_mixin(AbstractValueOrNilStorageMixin) @@ -152,7 +150,7 @@ return _value_or_nil_can_handle(FloatOrNilStorageShadow, space, w_val) @staticmethod def is_nil_value(val): - return longlong2float.float2longlong(val) == FloatOrNilStorageShadow.nil_value_longlong + return val == FloatOrNilStorageShadow.nil_value @staticmethod def wrap(space, val): return space.wrap_float(val) From noreply at buildbot.pypy.org Sat Mar 29 11:45:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 29 Mar 2014 11:45:07 +0100 (CET) Subject: [pypy-commit] pypy default: RPythonify the findall and finditer methods (hopefully) Message-ID: <20140329104507.8464A1D29E1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70322:285b8bfe8e26 Date: 2014-03-29 11:44 +0100 http://bitbucket.org/pypy/pypy/changeset/285b8bfe8e26/ Log: RPythonify the findall and finditer methods (hopefully) diff --git a/rpython/rlib/rsre/rsre_re.py b/rpython/rlib/rsre/rsre_re.py --- a/rpython/rlib/rsre/rsre_re.py +++ b/rpython/rlib/rsre/rsre_re.py @@ -71,7 +71,11 @@ def findall(self, string, pos=0, endpos=sys.maxint): matchlist = [] - for match in self.finditer(string, pos, endpos): + scanner = self.scanner(string, pos, endpos) + while True: + match = scanner.search() + if match is None: + break if self.groups == 0 or self.groups == 1: item = match.group(self.groups) else: @@ -80,7 +84,12 @@ return matchlist def finditer(self, string, pos=0, endpos=sys.maxint): - return iter(self.scanner(string, pos, endpos).search, None) + scanner = self.scanner(string, pos, endpos) + while True: + match = scanner.search() + if match is None: + break + yield match def subn(self, repl, string, count=0): filter = repl From noreply at buildbot.pypy.org Sat Mar 29 12:09:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 29 Mar 2014 12:09:31 +0100 (CET) Subject: [pypy-commit] pypy default: Work a bit harder to get valid RPython code for some functions. Message-ID: <20140329110931.E9F641C02AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70323:3c8fd2f8aba0 Date: 2014-03-29 12:08 +0100 http://bitbucket.org/pypy/pypy/changeset/3c8fd2f8aba0/ Log: Work a bit harder to get valid RPython code for some functions. diff --git a/rpython/rlib/rsre/rsre_re.py b/rpython/rlib/rsre/rsre_re.py --- a/rpython/rlib/rsre/rsre_re.py +++ b/rpython/rlib/rsre/rsre_re.py @@ -1,12 +1,14 @@ """ -Testing code. This is not used in a PyPy translation. -It exports the same interface as the Python 're' module. +This is not used in a PyPy translation, but it can be used +in RPython code (at least the functions at the start of the +module, except the ones with NOT_RPYTHON). It exports the +same interface as the Python 're' module. """ import re, sys from rpython.rlib.rsre import rsre_core, rsre_char from rpython.rlib.rsre.rpy import get_code as _get_code from rpython.rlib.unicodedata import unicodedb -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, we_are_translated rsre_char.set_unicode_db(unicodedb) @@ -18,24 +20,31 @@ X = VERBOSE = re.X # ignore whitespace and comments + at specialize.call_location() def match(pattern, string, flags=0): return compile(pattern, flags).match(string) + at specialize.call_location() def search(pattern, string, flags=0): return compile(pattern, flags).search(string) + at specialize.call_location() def findall(pattern, string, flags=0): return compile(pattern, flags).findall(string) + at specialize.call_location() def finditer(pattern, string, flags=0): return compile(pattern, flags).finditer(string) def sub(pattern, repl, string, count=0): + "NOT_RPYTHON" return compile(pattern).sub(repl, string, count) def subn(pattern, repl, string, count=0): + "NOT_RPYTHON" return compile(pattern).subn(repl, string, count) + at specialize.call_location() def split(pattern, string, maxsplit=0): return compile(pattern).split(string, maxsplit) @@ -79,6 +88,8 @@ if self.groups == 0 or self.groups == 1: item = match.group(self.groups) else: + assert False, ("findall() not supported if there is more " + "than one group: not valid RPython") item = match.groups("") matchlist.append(item) return matchlist @@ -92,6 +103,7 @@ yield match def subn(self, repl, string, count=0): + "NOT_RPYTHON" filter = repl if not callable(repl) and "\\" in repl: # handle non-literal strings; hand it over to the template compiler @@ -139,6 +151,7 @@ return item, n def sub(self, repl, string, count=0): + "NOT_RPYTHON" item, n = self.subn(repl, string, count) return item @@ -221,7 +234,9 @@ grp = self.group(i) if grp is None: grp = default grps.append(grp) - return tuple(grps) + if not we_are_translated(): + grps = tuple(grps) # xxx mostly to make tests happy + return grps def groupdict(self, default=None): d = {} diff --git a/rpython/rlib/rsre/test/test_re.py b/rpython/rlib/rsre/test/test_re.py --- a/rpython/rlib/rsre/test/test_re.py +++ b/rpython/rlib/rsre/test/test_re.py @@ -191,11 +191,15 @@ assert re.findall(":+", "abc") == [] assert re.findall(":+", "a:b::c:::d") == [":", "::", ":::"] assert re.findall("(:+)", "a:b::c:::d") == [":", "::", ":::"] + + def test_re_findall_2(self): + py.test.skip("findall() returning groups is not RPython") assert re.findall("(:)(:*)", "a:b::c:::d") == [(":", ""), (":", ":"), (":", "::")] def test_bug_117612(self): + py.test.skip("findall() returning groups is not RPython") assert re.findall(r"(a|(b))", "aba") == ( [("a", ""),("b", "b"),("a", "")]) diff --git a/rpython/rlib/rsre/test/test_zinterp.py b/rpython/rlib/rsre/test/test_zinterp.py --- a/rpython/rlib/rsre/test/test_zinterp.py +++ b/rpython/rlib/rsre/test/test_zinterp.py @@ -35,3 +35,23 @@ return int("aaaaaa" == g.group(0)) assert interpret(f, [3]) == 1 assert interpret(f, [0]) == 3 + +def test_translates(): + from rpython.rlib.rsre import rsre_re + def f(i): + if i: + s = "aaaaaa" + else: + s = "caaaaa" + print rsre_re.match("(a|b)aa", s) + print rsre_re.match("a{4}", s) + print rsre_re.search("(a|b)aa", s) + print rsre_re.search("a{4}", s) + for x in rsre_re.findall("(a|b)a", s): print x + for x in rsre_re.findall("a{2}", s): print x + for x in rsre_re.finditer("(a|b)a", s): print x + for x in rsre_re.finditer("a{2}", s): print x + for x in rsre_re.split("(a|b)a", s): print x + for x in rsre_re.split("a{2}", s): print x + return 0 + interpret(f, [3]) # assert does not crash From noreply at buildbot.pypy.org Sat Mar 29 12:13:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 29 Mar 2014 12:13:36 +0100 (CET) Subject: [pypy-commit] pypy default: Document better Message-ID: <20140329111336.C2D711C02AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70324:7a9f73156fdc Date: 2014-03-29 12:13 +0100 http://bitbucket.org/pypy/pypy/changeset/7a9f73156fdc/ Log: Document better diff --git a/rpython/rlib/rsre/rsre_re.py b/rpython/rlib/rsre/rsre_re.py --- a/rpython/rlib/rsre/rsre_re.py +++ b/rpython/rlib/rsre/rsre_re.py @@ -1,8 +1,9 @@ """ This is not used in a PyPy translation, but it can be used -in RPython code (at least the functions at the start of the -module, except the ones with NOT_RPYTHON). It exports the -same interface as the Python 're' module. +in RPython code. It exports the same interface as the +Python 're' module. You can call the functions at the start +of the module (expect the ones with NOT_RPYTHON for now). +They must be called with a *constant* pattern string. """ import re, sys from rpython.rlib.rsre import rsre_core, rsre_char From noreply at buildbot.pypy.org Sat Mar 29 12:48:52 2014 From: noreply at buildbot.pypy.org (krono) Date: Sat, 29 Mar 2014 12:48:52 +0100 (CET) Subject: [pypy-commit] pypy default: Be a bit more narrative about what can create a closure Message-ID: <20140329114852.430571D2351@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: Changeset: r70325:33a9aeb0b05e Date: 2014-03-29 12:47 +0100 http://bitbucket.org/pypy/pypy/changeset/33a9aeb0b05e/ Log: Be a bit more narrative about what can create a closure diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -16,7 +16,13 @@ if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): raise ValueError("%r is tagged as NOT_RPYTHON" % (func,)) if func.func_code.co_cellvars: - raise ValueError("RPython functions cannot create closures") + raise ValueError( +"""RPython functions cannot create closures +Possible casues: + Function is inner function + Function uses generator expressions + Lambda expressions +""") if not (func.func_code.co_flags & CO_NEWLOCALS): raise ValueError("The code object for a RPython function should have " "the flag CO_NEWLOCALS set.") From noreply at buildbot.pypy.org Sat Mar 29 12:56:02 2014 From: noreply at buildbot.pypy.org (krono) Date: Sat, 29 Mar 2014 12:56:02 +0100 (CET) Subject: [pypy-commit] pypy default: Include hint to which function is not rpythonic Message-ID: <20140329115602.4C1621D29E3@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: Changeset: r70326:d0950a7fb51a Date: 2014-03-29 12:55 +0100 http://bitbucket.org/pypy/pypy/changeset/d0950a7fb51a/ Log: Include hint to which function is not rpythonic diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -22,7 +22,7 @@ Function is inner function Function uses generator expressions Lambda expressions -""") +in %r""" % (func,)) if not (func.func_code.co_flags & CO_NEWLOCALS): raise ValueError("The code object for a RPython function should have " "the flag CO_NEWLOCALS set.") From noreply at buildbot.pypy.org Sat Mar 29 15:38:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 29 Mar 2014 15:38:48 +0100 (CET) Subject: [pypy-commit] stmgc timelog: update Message-ID: <20140329143848.DC26E1D29E3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: timelog Changeset: r1116:9700cfc5c11b Date: 2014-03-29 15:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/9700cfc5c11b/ Log: update diff --git a/c7/timelog.txt b/c7/timelog.txt --- a/c7/timelog.txt +++ b/c7/timelog.txt @@ -41,11 +41,12 @@ higher recorded times sorted first; the entry with the lowest amount of time is dropped. -- if there are several aborts from the same transaction start, then - regroup them by traceback, and report only once with the number - of consecutive occurrences and the total time; do that before inserting - in the heapq list, as otherwise if we have a lot of quick aborts they - would all be lost as not contributing significant time individually +- if there are several aborts from what is logically the same starting + place and which ends with the same traceback, we'd like to regroup them + and report them only once with the number of consecutive occurrences + and the total time; doing that before inserting the entries in the heapq + list is needed, as otherwise if we have a lot of quick aborts they + would all be lost as not contributing significant time individually. API of stmgc.h @@ -60,6 +61,10 @@ - we have APIs to enumerate a timelog's current entries, and enumerate each traceback's recorded frames. +- we can get directly the traceback of the transaction that aborted just + now. If the user determines it should be merged with an older + traceback, he calls a merge function. + Tracebacks ========== From noreply at buildbot.pypy.org Sat Mar 29 15:58:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 29 Mar 2014 15:58:56 +0100 (CET) Subject: [pypy-commit] stmgc timelog: Simplify the API a lot, by considering that recording several logs is Message-ID: <20140329145856.255501D29DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: timelog Changeset: r1117:3649d0fe67de Date: 2014-03-29 15:58 +0100 http://bitbucket.org/pypy/stmgc/changeset/3649d0fe67de/ Log: Simplify the API a lot, by considering that recording several logs is a job outside the scope of stmgc. diff --git a/c7/timelog.txt b/c7/timelog.txt --- a/c7/timelog.txt +++ b/c7/timelog.txt @@ -37,33 +37,27 @@ (less than 5 times slower than a RDTSC instruction, which is itself not safe in the presence of threads migrating among CPUs) -- record a fixed number of entries, as a fixed-size heapq list, with - higher recorded times sorted first; the entry with the lowest amount - of time is dropped. - -- if there are several aborts from what is logically the same starting +- record only the highest-time entry. The user of the library is + responsible for getting and clearing it often enough if it wants + more details. [[ For example, it may want to maintain a heapq + with several entries, but that's not the job of stmgc. Moreover, + if there are several aborts from what is logically the same starting place and which ends with the same traceback, we'd like to regroup them and report them only once with the number of consecutive occurrences and the total time; doing that before inserting the entries in the heapq list is needed, as otherwise if we have a lot of quick aborts they would all be lost as not contributing significant time individually. + ]] API of stmgc.h ============== -- timelogs are always thread-local. We have APIs to create, clear and - destroy them; recorded entries go to all active timelogs of this thread. - - the traceback reports are based on the user of the library pushing and popping stack entries to the current stack in every thread. -- we have APIs to enumerate a timelog's current entries, and enumerate - each traceback's recorded frames. - -- we can get directly the traceback of the transaction that aborted just - now. If the user determines it should be merged with an older - traceback, he calls a merge function. +- we have a simple API to get and clear the traceback and reason of the + single report with the highest recorded time, for the current thread. Tracebacks @@ -77,12 +71,11 @@ the common case of objects freed shortly after being allocated. We record one traceback pointer for every old object written during this -transaction. It could be avoided only if we are running with no timelog -at all (not just none in this thread), but it's probably not worth the +transaction. It could be avoided only if we disable all reports (not +just the one in this thread), but it's probably not worth the optimization. This is all thread-local, with the exception of when we record another thread's traceback. To implement this, we clone the complete traceback into the other thread's local allocator. It should be fine because it -is only needed when we have already determined that this entry has an -important enough recorded time to be worth storing. +is only needed once, on abort. From noreply at buildbot.pypy.org Sat Mar 29 16:49:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 29 Mar 2014 16:49:29 +0100 (CET) Subject: [pypy-commit] stmgc timelog: Getting started Message-ID: <20140329154929.7B6BB1D2351@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: timelog Changeset: r1118:3acb2c19742b Date: 2014-03-29 16:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/3acb2c19742b/ Log: Getting started diff --git a/c7/stm/timelog.c b/c7/stm/timelog.c new file mode 100644 --- /dev/null +++ b/c7/stm/timelog.c @@ -0,0 +1,17 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +stm_timelog_t *stm_fetch_and_remove_timelog(stm_thread_local_t *tl) +{ + stm_timelog_t *tlog = tl->last_tlog; + tl->last_tlog = NULL; + return tlog; +} + +void stm_free_timelog(stm_timelog_t *tlog) +{ + OPT_ASSERT(tlog != NULL); + free(tlog); +} diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -31,3 +31,4 @@ #include "stm/extra.c" #include "stm/fprintcolor.c" #include "stm/weakref.c" +#include "stm/timelog.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -70,6 +70,7 @@ int associated_segment_num; struct stm_thread_local_s *prev, *next; void *creating_pthread[2]; + struct stm_timelog_s *last_tlog; } stm_thread_local_t; /* this should use llvm's coldcc calling convention, @@ -336,6 +337,35 @@ const char *msg); +/* ---------- timelogs ---------- */ + +enum { + STLOG_REASON_UNKNOWN, + STLOG_REASON_ABORT_SELF, + STLOG_REASON_ABORT_OTHER, + STLOG_REASON_PAUSE, +}; +enum { + STLOG_CONTENTION_NONE, + STLOG_CONTENTION_WRITE_WRITE, + STLOG_CONTENTION_WRITE_READ, + STLOG_CONTENTION_INEVITABLE, +}; + +typedef struct stm_timelog_s { + uint8_t reason; + uint8_t contention; + int user; + double time_lost; + //stm_traceback_t *traceback_self; + //stm_traceback_t *traceback_other; +} stm_timelog_t; + +/* XXX maybe inline these functions if they turn out to be trivial */ +stm_timelog_t *stm_fetch_and_remove_timelog(stm_thread_local_t *); +void stm_free_timelog(stm_timelog_t *); + + /* ==================== END ==================== */ #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -98,6 +98,26 @@ int stm_can_move(object_t *); void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); + +#define STLOG_REASON_UNKNOWN ... +#define STLOG_REASON_ABORT_SELF ... +#define STLOG_REASON_ABORT_OTHER ... +#define STLOG_REASON_PAUSE ... +#define STLOG_CONTENTION_NONE ... +#define STLOG_CONTENTION_WRITE_WRITE ... +#define STLOG_CONTENTION_WRITE_READ ... +#define STLOG_CONTENTION_INEVITABLE ... + +typedef struct { + uint8_t reason; + uint8_t contention; + int user; + double time_lost; + ...; +} stm_timelog_t; + +stm_timelog_t *stm_fetch_and_remove_timelog(stm_thread_local_t *); +void stm_free_timelog(stm_timelog_t *); """) @@ -517,3 +537,7 @@ tl = self.tls[self.current_thread] if lib._check_become_globally_unique_transaction(tl): raise Conflict() + + def fetch_and_remove_timelog(self): + tl = self.tls[self.current_thread] + return lib.stm_fetch_and_remove_timelog(tl) diff --git a/c7/test/test_timelog.py b/c7/test/test_timelog.py new file mode 100644 --- /dev/null +++ b/c7/test/test_timelog.py @@ -0,0 +1,26 @@ +import py, time +from support import * + + +class TestTimeLog(BaseTest): + + def test_empty(self): + self.start_transaction() + tlog = self.fetch_and_remove_timelog() + assert tlog == ffi.NULL + + def test_simple_abort(self): + self.start_transaction() + start = time.time() + while abs(time.time() - start) <= 0.05: + pass + self.abort_transaction() + # + self.start_transaction() + tlog = self.fetch_and_remove_timelog() + assert tlog != ffi.NULL + assert tlog.reason == lib.STLOG_REASON_UNKNOWN + assert tlog.contention == lib.STLOG_CONTENTION_NONE + assert tlog.user == 0 + assert 0.0499 <= tlog.time_lost < 1.0 + lib.stm_free_timelog(tlog) From noreply at buildbot.pypy.org Sat Mar 29 20:54:37 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 29 Mar 2014 20:54:37 +0100 (CET) Subject: [pypy-commit] pypy default: compile test_capi once for a version of pypy and the c file Message-ID: <20140329195437.73EF31D29DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70327:ac3fb021b26a Date: 2014-03-28 17:17 +0300 http://bitbucket.org/pypy/pypy/changeset/ac3fb021b26a/ Log: compile test_capi once for a version of pypy and the c file diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -7,12 +7,13 @@ return ext -def compile_shared(csource, modulename): +def compile_shared(csource, modulename, output_dir=None): """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, and import it. """ thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() + if output_dir is None: + output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,7 +1,30 @@ +import sys, tempfile, imp, binascii, os + try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") -else: + +def get_hashed_dir(cfile): + # from cffi's Verifier() + key = '\x00'.join([sys.version[:3], cfile]) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + return output_dir + +cfile = '_testcapimodule.c' +output_dir = get_hashed_dir(cfile) + +try: + fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) + imp.load_module('_testcapi', fp, filename, description) +except ImportError: import _pypy_testcapi - _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') + _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) From noreply at buildbot.pypy.org Sat Mar 29 20:54:38 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 29 Mar 2014 20:54:38 +0100 (CET) Subject: [pypy-commit] pypy default: close file after use Message-ID: <20140329195438.B27F51D29DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70328:c3a274d4f042 Date: 2014-03-29 21:05 +0300 http://bitbucket.org/pypy/pypy/changeset/c3a274d4f042/ Log: close file after use diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,6 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) + f.close() def test_head(self): response = self.request( From noreply at buildbot.pypy.org Sat Mar 29 20:54:40 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 29 Mar 2014 20:54:40 +0100 (CET) Subject: [pypy-commit] pypy default: hash _testcapi.c file contents, and add test Message-ID: <20140329195440.03A351D29DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70329:31a0923cb46d Date: 2014-03-29 22:50 +0300 http://bitbucket.org/pypy/pypy/changeset/31a0923cb46d/ Log: hash _testcapi.c file contents, and add test diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -6,8 +6,10 @@ raise ImportError("No module named '_testcapi'") def get_hashed_dir(cfile): + with open(cfile,'r') as fid: + content = fid.read() # from cffi's Verifier() - key = '\x00'.join([sys.version[:3], cfile]) + key = '\x00'.join([sys.version[:3], content]) if sys.version_info >= (3,): key = key.encode('utf-8') k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) @@ -20,7 +22,8 @@ return output_dir cfile = '_testcapimodule.c' -output_dir = get_hashed_dir(cfile) +thisdir = os.path.dirname(__file__) +output_dir = get_hashed_dir(os.path.join(thisdir, cfile)) try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) diff --git a/pypy/module/test_lib_pypy/test_testcapi.py b/pypy/module/test_lib_pypy/test_testcapi.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_testcapi.py @@ -0,0 +1,17 @@ +import py, sys + +if '__pypy__' not in sys.builtin_module_names: + py.test.skip('pypy only test') + +from lib_pypy import _testcapi #this should insure _testcapi is built + +def test_get_hashed_dir(): + import sys + script = '''import _testcapi + assert 'get_hashed_dir' in dir(_testcapi) + return 0 + ''' + output = py.process.cmdexec('''"%s" -c "%s"''' % + (sys.executable, script)) + assert output == '' + From noreply at buildbot.pypy.org Sat Mar 29 22:27:45 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 29 Mar 2014 22:27:45 +0100 (CET) Subject: [pypy-commit] pypy default: fix test Message-ID: <20140329212745.2AB851D29E1@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70330:18d16e0a184b Date: 2014-03-30 00:26 +0300 http://bitbucket.org/pypy/pypy/changeset/18d16e0a184b/ Log: fix test diff --git a/pypy/module/test_lib_pypy/test_testcapi.py b/pypy/module/test_lib_pypy/test_testcapi.py --- a/pypy/module/test_lib_pypy/test_testcapi.py +++ b/pypy/module/test_lib_pypy/test_testcapi.py @@ -3,14 +3,12 @@ if '__pypy__' not in sys.builtin_module_names: py.test.skip('pypy only test') -from lib_pypy import _testcapi #this should insure _testcapi is built +from lib_pypy import _testcapi #make sure _testcapi is built def test_get_hashed_dir(): import sys - script = '''import _testcapi - assert 'get_hashed_dir' in dir(_testcapi) - return 0 - ''' + # This should not compile _testcapi, so the output is empty + script = "import _testcapi; assert 'get_hashed_dir' in dir(_testcapi)" output = py.process.cmdexec('''"%s" -c "%s"''' % (sys.executable, script)) assert output == '' From noreply at buildbot.pypy.org Sun Mar 30 05:17:12 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 30 Mar 2014 05:17:12 +0200 (CEST) Subject: [pypy-commit] pypy win32-fixes4: merge default into branch Message-ID: <20140330031712.47FAB1D29DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70331:bc9fdf95881a Date: 2014-03-30 01:10 +0300 http://bitbucket.org/pypy/pypy/changeset/bc9fdf95881a/ Log: merge default into branch diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,6 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) + f.close() def test_head(self): response = self.request( diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -6,8 +6,10 @@ raise ImportError("No module named '_testcapi'") def get_hashed_dir(cfile): + with open(cfile,'r') as fid: + content = fid.read() # from cffi's Verifier() - key = '\x00'.join([sys.version[:3], cfile]) + key = '\x00'.join([sys.version[:3], content]) if sys.version_info >= (3,): key = key.encode('utf-8') k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) @@ -20,7 +22,8 @@ return output_dir cfile = '_testcapimodule.c' -output_dir = get_hashed_dir(cfile) +thisdir = os.path.dirname(__file__) +output_dir = get_hashed_dir(os.path.join(thisdir, cfile)) try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -14,7 +14,7 @@ The present document describes the specific garbage collectors that we wrote in our framework. -.. _`EU-report on this topic`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU-report on this topic`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf Garbage collectors currently written for the GC framework diff --git a/pypy/module/test_lib_pypy/test_testcapi.py b/pypy/module/test_lib_pypy/test_testcapi.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_testcapi.py @@ -0,0 +1,15 @@ +import py, sys + +if '__pypy__' not in sys.builtin_module_names: + py.test.skip('pypy only test') + +from lib_pypy import _testcapi #make sure _testcapi is built + +def test_get_hashed_dir(): + import sys + # This should not compile _testcapi, so the output is empty + script = "import _testcapi; assert 'get_hashed_dir' in dir(_testcapi)" + output = py.process.cmdexec('''"%s" -c "%s"''' % + (sys.executable, script)) + assert output == '' + diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -16,7 +16,13 @@ if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): raise ValueError("%r is tagged as NOT_RPYTHON" % (func,)) if func.func_code.co_cellvars: - raise ValueError("RPython functions cannot create closures") + raise ValueError( +"""RPython functions cannot create closures +Possible casues: + Function is inner function + Function uses generator expressions + Lambda expressions +in %r""" % (func,)) if not (func.func_code.co_flags & CO_NEWLOCALS): raise ValueError("The code object for a RPython function should have " "the flag CO_NEWLOCALS set.") diff --git a/rpython/rlib/rsre/rsre_re.py b/rpython/rlib/rsre/rsre_re.py --- a/rpython/rlib/rsre/rsre_re.py +++ b/rpython/rlib/rsre/rsre_re.py @@ -1,12 +1,15 @@ """ -Testing code. This is not used in a PyPy translation. -It exports the same interface as the Python 're' module. +This is not used in a PyPy translation, but it can be used +in RPython code. It exports the same interface as the +Python 're' module. You can call the functions at the start +of the module (expect the ones with NOT_RPYTHON for now). +They must be called with a *constant* pattern string. """ import re, sys from rpython.rlib.rsre import rsre_core, rsre_char from rpython.rlib.rsre.rpy import get_code as _get_code from rpython.rlib.unicodedata import unicodedb -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, we_are_translated rsre_char.set_unicode_db(unicodedb) @@ -18,24 +21,31 @@ X = VERBOSE = re.X # ignore whitespace and comments + at specialize.call_location() def match(pattern, string, flags=0): return compile(pattern, flags).match(string) + at specialize.call_location() def search(pattern, string, flags=0): return compile(pattern, flags).search(string) + at specialize.call_location() def findall(pattern, string, flags=0): return compile(pattern, flags).findall(string) + at specialize.call_location() def finditer(pattern, string, flags=0): return compile(pattern, flags).finditer(string) def sub(pattern, repl, string, count=0): + "NOT_RPYTHON" return compile(pattern).sub(repl, string, count) def subn(pattern, repl, string, count=0): + "NOT_RPYTHON" return compile(pattern).subn(repl, string, count) + at specialize.call_location() def split(pattern, string, maxsplit=0): return compile(pattern).split(string, maxsplit) @@ -71,18 +81,30 @@ def findall(self, string, pos=0, endpos=sys.maxint): matchlist = [] - for match in self.finditer(string, pos, endpos): + scanner = self.scanner(string, pos, endpos) + while True: + match = scanner.search() + if match is None: + break if self.groups == 0 or self.groups == 1: item = match.group(self.groups) else: + assert False, ("findall() not supported if there is more " + "than one group: not valid RPython") item = match.groups("") matchlist.append(item) return matchlist def finditer(self, string, pos=0, endpos=sys.maxint): - return iter(self.scanner(string, pos, endpos).search, None) + scanner = self.scanner(string, pos, endpos) + while True: + match = scanner.search() + if match is None: + break + yield match def subn(self, repl, string, count=0): + "NOT_RPYTHON" filter = repl if not callable(repl) and "\\" in repl: # handle non-literal strings; hand it over to the template compiler @@ -130,6 +152,7 @@ return item, n def sub(self, repl, string, count=0): + "NOT_RPYTHON" item, n = self.subn(repl, string, count) return item @@ -212,7 +235,9 @@ grp = self.group(i) if grp is None: grp = default grps.append(grp) - return tuple(grps) + if not we_are_translated(): + grps = tuple(grps) # xxx mostly to make tests happy + return grps def groupdict(self, default=None): d = {} diff --git a/rpython/rlib/rsre/test/test_re.py b/rpython/rlib/rsre/test/test_re.py --- a/rpython/rlib/rsre/test/test_re.py +++ b/rpython/rlib/rsre/test/test_re.py @@ -191,11 +191,15 @@ assert re.findall(":+", "abc") == [] assert re.findall(":+", "a:b::c:::d") == [":", "::", ":::"] assert re.findall("(:+)", "a:b::c:::d") == [":", "::", ":::"] + + def test_re_findall_2(self): + py.test.skip("findall() returning groups is not RPython") assert re.findall("(:)(:*)", "a:b::c:::d") == [(":", ""), (":", ":"), (":", "::")] def test_bug_117612(self): + py.test.skip("findall() returning groups is not RPython") assert re.findall(r"(a|(b))", "aba") == ( [("a", ""),("b", "b"),("a", "")]) diff --git a/rpython/rlib/rsre/test/test_zinterp.py b/rpython/rlib/rsre/test/test_zinterp.py --- a/rpython/rlib/rsre/test/test_zinterp.py +++ b/rpython/rlib/rsre/test/test_zinterp.py @@ -35,3 +35,23 @@ return int("aaaaaa" == g.group(0)) assert interpret(f, [3]) == 1 assert interpret(f, [0]) == 3 + +def test_translates(): + from rpython.rlib.rsre import rsre_re + def f(i): + if i: + s = "aaaaaa" + else: + s = "caaaaa" + print rsre_re.match("(a|b)aa", s) + print rsre_re.match("a{4}", s) + print rsre_re.search("(a|b)aa", s) + print rsre_re.search("a{4}", s) + for x in rsre_re.findall("(a|b)a", s): print x + for x in rsre_re.findall("a{2}", s): print x + for x in rsre_re.finditer("(a|b)a", s): print x + for x in rsre_re.finditer("a{2}", s): print x + for x in rsre_re.split("(a|b)a", s): print x + for x in rsre_re.split("a{2}", s): print x + return 0 + interpret(f, [3]) # assert does not crash From noreply at buildbot.pypy.org Sun Mar 30 05:17:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 30 Mar 2014 05:17:13 +0200 (CEST) Subject: [pypy-commit] pypy win32-fixes4: give up on poll() for win32, rely on lib-python -A test_ftplib for verification Message-ID: <20140330031713.A78341D29DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70332:02e471453e7b Date: 2014-03-30 06:12 +0300 http://bitbucket.org/pypy/pypy/changeset/02e471453e7b/ Log: give up on poll() for win32, rely on lib-python -A test_ftplib for verification diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -35,7 +35,7 @@ SOCKET_HAS_TIMED_OUT, SOCKET_HAS_BEEN_CLOSED = 2, 3 SOCKET_TOO_LARGE_FOR_SELECT, SOCKET_OPERATION_OK = 4, 5 -HAVE_RPOLL = True # Even win32 has rpoll.poll +HAVE_RPOLL = 'poll' in dir(rpoll) constants = {} constants["SSL_ERROR_ZERO_RETURN"] = PY_SSL_ERROR_ZERO_RETURN diff --git a/rpython/rlib/rpoll.py b/rpython/rlib/rpoll.py --- a/rpython/rlib/rpoll.py +++ b/rpython/rlib/rpoll.py @@ -141,8 +141,9 @@ # poll() for Win32 # if hasattr(_c, 'WSAEventSelect'): - - def poll(fddict, timeout=-1): + # WSAWaitForMultipleEvents is broken. If you wish to try it, + # rename the function to poll() and run test_exchange in test_rpoll + def _poll(fddict, timeout=-1): """'fddict' maps file descriptors to interesting events. 'timeout' is an integer in milliseconds, and NOT a float number of seconds, but it's the same in CPython. Use -1 for infinite. @@ -188,6 +189,7 @@ if timeout < 0: timeout = _c.INFINITE + # XXX does not correctly report write status of a port ret = _c.WSAWaitForMultipleEvents(numevents, socketevents, False, timeout, False) diff --git a/rpython/rlib/test/test_rpoll.py b/rpython/rlib/test/test_rpoll.py --- a/rpython/rlib/test/test_rpoll.py +++ b/rpython/rlib/test/test_rpoll.py @@ -3,7 +3,10 @@ import py from rpython.rlib.rsocket import * -from rpython.rlib.rpoll import * +try: + from rpython.rlib.rpoll import poll +except ImportError: + py.test.skip('no poll available on this platform') from rpython.rtyper.test.test_llinterp import interpret def setup_module(mod): @@ -62,6 +65,8 @@ serv.close() def test_exchange(): + if not poll: + py.test.skip('poll not available for this platform') serv = RSocket(AF_INET, SOCK_STREAM) serv.bind(INETAddress('127.0.0.1', INADDR_ANY)) serv.listen(1) @@ -71,9 +76,9 @@ assert len(events) == 0 cli = RSocket(AF_INET, SOCK_STREAM) - cli.setblocking(False) + cli.setblocking(True) err = cli.connect_ex(servaddr) - assert err != 0 + assert err == 0 events = poll({serv.fd: POLLIN}, timeout=500) one_in_event(events, serv.fd) From noreply at buildbot.pypy.org Sun Mar 30 05:17:14 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 30 Mar 2014 05:17:14 +0200 (CEST) Subject: [pypy-commit] pypy win32-fixes4: close for re-merging to default Message-ID: <20140330031714.D86691D29DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: win32-fixes4 Changeset: r70333:2cdacb7ce582 Date: 2014-03-30 06:14 +0300 http://bitbucket.org/pypy/pypy/changeset/2cdacb7ce582/ Log: close for re-merging to default From noreply at buildbot.pypy.org Sun Mar 30 05:17:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 30 Mar 2014 05:17:16 +0200 (CEST) Subject: [pypy-commit] pypy default: remerge branch, use select() rather than poll() for ssl on win32 Message-ID: <20140330031716.27CD61D29DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70334:7fbef7602628 Date: 2014-03-30 06:15 +0300 http://bitbucket.org/pypy/pypy/changeset/7fbef7602628/ Log: remerge branch, use select() rather than poll() for ssl on win32 diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -64,6 +64,7 @@ ^pypy/goal/pypy-jvm.jar ^pypy/goal/.+\.exe$ ^pypy/goal/.+\.dll$ +^pypy/goal/.+\.lib$ ^pypy/_cache$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -35,7 +35,7 @@ SOCKET_HAS_TIMED_OUT, SOCKET_HAS_BEEN_CLOSED = 2, 3 SOCKET_TOO_LARGE_FOR_SELECT, SOCKET_OPERATION_OK = 4, 5 -HAVE_RPOLL = True # Even win32 has rpoll.poll +HAVE_RPOLL = 'poll' in dir(rpoll) constants = {} constants["SSL_ERROR_ZERO_RETURN"] = PY_SSL_ERROR_ZERO_RETURN diff --git a/rpython/rlib/rpoll.py b/rpython/rlib/rpoll.py --- a/rpython/rlib/rpoll.py +++ b/rpython/rlib/rpoll.py @@ -141,8 +141,9 @@ # poll() for Win32 # if hasattr(_c, 'WSAEventSelect'): - - def poll(fddict, timeout=-1): + # WSAWaitForMultipleEvents is broken. If you wish to try it, + # rename the function to poll() and run test_exchange in test_rpoll + def _poll(fddict, timeout=-1): """'fddict' maps file descriptors to interesting events. 'timeout' is an integer in milliseconds, and NOT a float number of seconds, but it's the same in CPython. Use -1 for infinite. @@ -188,6 +189,7 @@ if timeout < 0: timeout = _c.INFINITE + # XXX does not correctly report write status of a port ret = _c.WSAWaitForMultipleEvents(numevents, socketevents, False, timeout, False) diff --git a/rpython/rlib/test/test_rpoll.py b/rpython/rlib/test/test_rpoll.py --- a/rpython/rlib/test/test_rpoll.py +++ b/rpython/rlib/test/test_rpoll.py @@ -3,12 +3,25 @@ import py from rpython.rlib.rsocket import * -from rpython.rlib.rpoll import * +try: + from rpython.rlib.rpoll import poll +except ImportError: + py.test.skip('no poll available on this platform') from rpython.rtyper.test.test_llinterp import interpret def setup_module(mod): rsocket_startup() +def one_in_event(events, fd): + assert len(events) == 1 + assert events[0][0] == fd + assert events[0][1] & POLLIN + +def one_out_event(events, fd): + assert len(events) == 1 + assert events[0][0] == fd + assert events[0][1] & POLLOUT + def test_simple(): serv = RSocket(AF_INET, SOCK_STREAM) serv.bind(INETAddress('127.0.0.1', INADDR_ANY)) @@ -24,18 +37,14 @@ assert err != 0 events = poll({serv.fd: POLLIN}, timeout=500) - assert len(events) == 1 - assert events[0][0] == serv.fd - assert events[0][1] & POLLIN + one_in_event(events, serv.fd) servconn_fd, cliaddr = serv.accept() servconn = RSocket(AF_INET, fd=servconn_fd) events = poll({serv.fd: POLLIN, cli.fd: POLLOUT}, timeout=500) - assert len(events) == 1 - assert events[0][0] == cli.fd - assert events[0][1] & POLLOUT + one_out_event(events, cli.fd) err = cli.connect_ex(servaddr) # win32: returns WSAEISCONN when the connection finally succeed. @@ -55,6 +64,72 @@ servconn.close() serv.close() +def test_exchange(): + if not poll: + py.test.skip('poll not available for this platform') + serv = RSocket(AF_INET, SOCK_STREAM) + serv.bind(INETAddress('127.0.0.1', INADDR_ANY)) + serv.listen(1) + servaddr = serv.getsockname() + + events = poll({serv.fd: POLLIN}, timeout=100) + assert len(events) == 0 + + cli = RSocket(AF_INET, SOCK_STREAM) + cli.setblocking(True) + err = cli.connect_ex(servaddr) + assert err == 0 + + events = poll({serv.fd: POLLIN}, timeout=500) + one_in_event(events, serv.fd) + + servconn_fd, cliaddr = serv.accept() + servconn = RSocket(AF_INET, fd=servconn_fd) + + events = poll({serv.fd: POLLIN, + cli.fd: POLLOUT}, timeout=500) + one_out_event(events, cli.fd) + + #send some data + events = poll({cli.fd: POLLOUT}, timeout=500) + one_out_event(events, cli.fd) + cli.send("g'day, mate") + events = poll({servconn.fd: POLLIN}, timeout=500) + one_in_event(events, servconn.fd) + answer = servconn.recv(1024) + assert answer == "g'day, mate" + + #send a reply + events = poll({servconn.fd: POLLOUT}, timeout=500) + one_out_event(events, servconn.fd) + servconn.send("you mean hello?") + events = poll({cli.fd: POLLIN}, timeout=500) + one_in_event(events, cli.fd) + answer = cli.recv(1024) + assert answer == "you mean hello?" + + #send more data + events = poll({cli.fd: POLLOUT}, timeout=500) + one_out_event(events, cli.fd) + cli.send("sorry, wrong channel") + events = poll({servconn.fd: POLLIN}, timeout=500) + one_in_event(events, servconn.fd) + answer = servconn.recv(1024) + assert answer == "sorry, wrong channel" + + events = poll({servconn.fd: POLLOUT}, timeout=500) + one_out_event(events, servconn.fd) + servconn.send("np bye") + events = poll({cli.fd: POLLIN}, timeout=500) + one_in_event(events, cli.fd) + answer = cli.recv(1024) + assert answer == "np bye" + + cli.close() + servconn.close() + serv.close() + + def test_select(): if os.name == 'nt': py.test.skip('cannot select on file handles on windows') From noreply at buildbot.pypy.org Sun Mar 30 08:54:09 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 30 Mar 2014 08:54:09 +0200 (CEST) Subject: [pypy-commit] pypy default: whoops Message-ID: <20140330065409.890D81C1154@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70335:3ff23b5bb135 Date: 2014-03-30 09:50 +0300 http://bitbucket.org/pypy/pypy/changeset/3ff23b5bb135/ Log: whoops diff --git a/rpython/rlib/test/test_rpoll.py b/rpython/rlib/test/test_rpoll.py --- a/rpython/rlib/test/test_rpoll.py +++ b/rpython/rlib/test/test_rpoll.py @@ -3,6 +3,7 @@ import py from rpython.rlib.rsocket import * +from rpython.rlib.rpoll import select try: from rpython.rlib.rpoll import poll except ImportError: From noreply at buildbot.pypy.org Sun Mar 30 19:14:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 30 Mar 2014 19:14:02 +0200 (CEST) Subject: [pypy-commit] stmgc default: Starting on reporting timings, more directly than in the 'timelog' Message-ID: <20140330171402.D71001C02AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1119:e1c90b6ac3f8 Date: 2014-03-30 19:13 +0200 http://bitbucket.org/pypy/stmgc/changeset/e1c90b6ac3f8/ Log: Starting on reporting timings, more directly than in the 'timelog' branch but without tracebacks diff --git a/c7/demo/Makefile b/c7/demo/Makefile --- a/c7/demo/Makefile +++ b/c7/demo/Makefile @@ -17,22 +17,20 @@ H_FILES = ../stmgc.h ../stm/*.h C_FILES = ../stmgc.c ../stm/*.c +COMMON = -I.. -pthread -lrt -g -Wall -Werror + # note that 'build' is partially optimized but still contains all asserts debug-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -DSTM_DEBUGPRINT -DSTM_GC_NURSERY=128 -g -O0 \ - $< -o debug-$* -Wall -Werror ../stmgc.c + clang $(COMMON) -DSTM_DEBUGPRINT -DSTM_GC_NURSERY=128 -O0 \ + $< -o debug-$* ../stmgc.c build-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -DSTM_GC_NURSERY=128 -g -O1 \ - $< -o build-$* -Wall -Werror ../stmgc.c + clang $(COMMON) -DSTM_GC_NURSERY=128 -O1 $< -o build-$* ../stmgc.c release-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -g -DNDEBUG -O2 $< -o release-$* \ - -Wall -Werror ../stmgc.c + clang $(COMMON) -DNDEBUG -O2 $< -o release-$* ../stmgc.c release-htm-%: %.c ../../htm-c7/stmgc.? ../../htm-c7/htm.h - clang -I.. -pthread -g -O2 $< -o release-htm-$* ../../htm-c7/stmgc.c -Wall -DUSE_HTM - - + clang $(COMMON) -O2 $< -o release-htm-$* ../../htm-c7/stmgc.c -DUSE_HTM diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -172,7 +172,7 @@ retry: if (jmpbuf == NULL) { - wait_for_end_of_inevitable_transaction(false); + wait_for_end_of_inevitable_transaction(tl); } if (!acquire_thread_segment(tl)) @@ -181,6 +181,8 @@ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); assert(STM_PSEGMENT->transaction_state == TS_NONE); + change_timing_state(STM_TIME_RUN_CURRENT); + STM_PSEGMENT->start_time = tl->_timing_cur_start; STM_PSEGMENT->safe_point = SP_RUNNING; STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR : TS_INEVITABLE); @@ -433,7 +435,7 @@ list_clear(STM_PSEGMENT->modified_old_objects); } -static void _finish_transaction(void) +static void _finish_transaction(enum stm_time_e attribute_to) { STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; @@ -442,6 +444,8 @@ LIST_FREE(STM_PSEGMENT->objects_pointing_to_nursery); LIST_FREE(STM_PSEGMENT->large_overflow_objects); + timing_end_transaction(attribute_to); + stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ @@ -505,7 +509,7 @@ } /* done */ - _finish_transaction(); + _finish_transaction(STM_TIME_RUN_COMMITTED); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ s_mutex_unlock(); @@ -635,7 +639,7 @@ : NURSERY_END; } - _finish_transaction(); + _finish_transaction(STM_TIME_RUN_ABORTED_OTHER); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ /* Broadcast C_ABORTED to wake up contention.c */ @@ -668,7 +672,7 @@ if (STM_PSEGMENT->transaction_state == TS_REGULAR) { dprintf(("become_inevitable: %s\n", msg)); - wait_for_end_of_inevitable_transaction(true); + wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; STM_SEGMENT->jmpbuf_ptr = NULL; clear_callbacks_on_abort(); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -121,7 +121,7 @@ /* Start time: to know approximately for how long a transaction has been running, in contention management */ - uint64_t start_time; + double start_time; /* This is the number stored in the overflowed objects (a multiple of GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -134,11 +134,15 @@ if (is_major_collection_requested()) { /* if still true */ + enum stm_time_e oldstate = change_timing_state(STM_TIME_MAJOR_GC); + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); if (is_major_collection_requested()) { /* if *still* true */ major_collection_now_at_safe_point(); } + + change_timing_state(oldstate); } s_mutex_unlock(); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -318,7 +318,11 @@ stm_safe_point(); + change_timing_state(STM_TIME_MINOR_GC); + _do_minor_collection(commit); + + change_timing_state(commit ? STM_TIME_BOOKKEEPING : STM_TIME_RUN_CURRENT); } void stm_collect(long level) diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -169,6 +169,8 @@ num = tl->prev->associated_segment_num; } tl->thread_local_obj = NULL; + tl->_timing_cur_state = STM_TIME_OUTSIDE_TRANSACTION; + tl->_timing_cur_start = get_stm_time(); /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -31,7 +31,6 @@ pthread_cond_t cond[_C_TOTAL]; /* some additional pieces of global state follow */ uint8_t in_use1[NB_SEGMENTS]; /* 1 if running a pthread */ - uint64_t global_time; }; char reserved[192]; } sync_ctl __attribute__((aligned(64))); @@ -120,13 +119,14 @@ /************************************************************/ -static void wait_for_end_of_inevitable_transaction(bool can_abort) +static void wait_for_end_of_inevitable_transaction( + stm_thread_local_t *tl_or_null_if_can_abort) { long i; restart: for (i = 1; i <= NB_SEGMENTS; i++) { if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { - if (can_abort) { + if (tl_or_null_if_can_abort == NULL) { /* handle this case like a contention: it will either abort us (not the other thread, which is inevitable), or wait for a while. If we go past this call, then we @@ -137,7 +137,11 @@ else { /* wait for stm_commit_transaction() to finish this inevitable transaction */ + change_timing_state_tl(tl_or_null_if_can_abort, + STM_TIME_WAIT_INEVITABLE); cond_wait(C_INEVITABLE); + /* don't bother changing the timing state again: the caller + will very soon go to STM_TIME_RUN_CURRENT */ } goto restart; } @@ -188,7 +192,6 @@ assert(STM_SEGMENT->segment_num == num); assert(STM_SEGMENT->running_thread == NULL); STM_SEGMENT->running_thread = tl; - STM_PSEGMENT->start_time = ++sync_ctl.global_time; return true; } diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -28,7 +28,7 @@ static bool acquire_thread_segment(stm_thread_local_t *tl); static void release_thread_segment(stm_thread_local_t *tl); -static void wait_for_end_of_inevitable_transaction(bool can_abort); +static void wait_for_end_of_inevitable_transaction(stm_thread_local_t *); enum sync_type_e { STOP_OTHERS_UNTIL_MUTEX_UNLOCK, diff --git a/c7/stm/timing.c b/c7/stm/timing.c new file mode 100644 --- /dev/null +++ b/c7/stm/timing.c @@ -0,0 +1,44 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +static inline void add_timing(stm_thread_local_t *tl, enum stm_time_e category, + double elapsed) +{ + tl->timing[category] += elapsed; +} + +#define TIMING_CHANGE(tl, newstate) \ + double curtime = get_stm_time(); \ + double elasped = curtime - tl->_timing_cur_start; \ + enum stm_time_e oldstate = tl->_timing_cur_state; \ + add_timing(tl, oldstate, elasped); \ + tl->_timing_cur_state = newstate; \ + tl->_timing_cur_start = curtime + +static enum stm_time_e change_timing_state(enum stm_time_e newstate) +{ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + TIMING_CHANGE(tl, newstate); + return oldstate; +} + +static void change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate) +{ + TIMING_CHANGE(tl, newstate); +} + +static void timing_end_transaction(enum stm_time_e attribute_to) +{ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + TIMING_CHANGE(tl, STM_TIME_OUTSIDE_TRANSACTION); + add_timing(tl, attribute_to, tl->timing[STM_TIME_RUN_CURRENT]); + tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; +} + +void stm_flush_timing(stm_thread_local_t *tl) +{ + change_timing_state_tl(tl, tl->_timing_cur_state); +} diff --git a/c7/stm/timing.h b/c7/stm/timing.h new file mode 100644 --- /dev/null +++ b/c7/stm/timing.h @@ -0,0 +1,14 @@ +#include + +static inline double get_stm_time(void) +{ + struct timespec tp; + clock_gettime(CLOCK_MONOTONIC, &tp); + return tp.tv_sec + tp.tv_nsec * 0.000000001; +} + +static enum stm_time_e change_timing_state(enum stm_time_e newstate); +static void change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate); + +static void timing_end_transaction(enum stm_time_e attribute_to); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -13,6 +13,7 @@ #include "stm/extra.h" #include "stm/fprintcolor.h" #include "stm/weakref.h" +#include "stm/timing.h" #include "stm/misc.c" #include "stm/list.c" @@ -31,3 +32,4 @@ #include "stm/extra.c" #include "stm/fprintcolor.c" #include "stm/weakref.c" +#include "stm/timing.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -54,6 +54,25 @@ object_t *ss; }; +enum stm_time_e { + STM_TIME_OUTSIDE_TRANSACTION, + STM_TIME_RUN_CURRENT, + STM_TIME_RUN_COMMITTED, + STM_TIME_RUN_ABORTED_WRITE_WRITE, + STM_TIME_RUN_ABORTED_WRITE_READ, + STM_TIME_RUN_ABORTED_INEVITABLE, + STM_TIME_RUN_ABORTED_OTHER, + STM_TIME_WAIT_FREE_SEGMENT, + STM_TIME_WAIT_WRITE_WRITE, + STM_TIME_WAIT_WRITE_READ, + STM_TIME_WAIT_INEVITABLE, + STM_TIME_BOOKKEEPING, + STM_TIME_MINOR_GC, + STM_TIME_MAJOR_GC, + STM_TIME_SYNC_PAUSE, + _STM_TIME_N +}; + typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; @@ -66,6 +85,10 @@ /* after an abort, some details about the abort are stored there. (these fields are not modified on a successful commit) */ long last_abort__bytes_in_nursery; + /* timing information, accumulated */ + float timing[_STM_TIME_N]; + double _timing_cur_start; + enum stm_time_e _timing_cur_state; /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; @@ -336,6 +359,10 @@ const char *msg); +/* Temporary? */ +void stm_flush_timing(stm_thread_local_t *); + + /* ==================== END ==================== */ #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -24,6 +24,7 @@ size_t mem_bytes_to_clear_on_abort; long last_abort__bytes_in_nursery; int associated_segment_num; + float timing[]; ...; } stm_thread_local_t; @@ -98,6 +99,24 @@ int stm_can_move(object_t *); void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); + +#define STM_TIME_OUTSIDE_TRANSACTION ... +#define STM_TIME_RUN_CURRENT ... +#define STM_TIME_RUN_COMMITTED ... +#define STM_TIME_RUN_ABORTED_WRITE_WRITE ... +#define STM_TIME_RUN_ABORTED_WRITE_READ ... +#define STM_TIME_RUN_ABORTED_INEVITABLE ... +#define STM_TIME_RUN_ABORTED_OTHER ... +#define STM_TIME_WAIT_FREE_SEGMENT ... +#define STM_TIME_WAIT_WRITE_WRITE ... +#define STM_TIME_WAIT_WRITE_READ ... +#define STM_TIME_WAIT_INEVITABLE ... +#define STM_TIME_BOOKKEEPING ... +#define STM_TIME_MINOR_GC ... +#define STM_TIME_MAJOR_GC ... +#define STM_TIME_SYNC_PAUSE ... + +void stm_flush_timing(stm_thread_local_t *); """) @@ -261,6 +280,7 @@ undef_macros=['NDEBUG'], include_dirs=[parent_dir], extra_compile_args=['-g', '-O0', '-Werror', '-ferror-limit=1'], + extra_link_args=['-g', '-lrt'], force_generic_engine=True) diff --git a/c7/test/test_timing.py b/c7/test/test_timing.py new file mode 100644 --- /dev/null +++ b/c7/test/test_timing.py @@ -0,0 +1,37 @@ +from support import * +import py, time + + +class TestTiming(BaseTest): + + def gettimer(self, n): + tl = self.tls[self.current_thread] + lib.stm_flush_timing(tl) + return tl.timing[n] + + def expect_timer(self, n, expected_value): + real = self.gettimer(n) + print 'timer %d is %s, expecting %s' % (n, real, expected_value) + assert abs(real - expected_value) < 0.09 + + def test_time_outside_transaction(self): + time.sleep(0.2) + self.start_transaction() + self.commit_transaction() + self.expect_timer(lib.STM_TIME_OUTSIDE_TRANSACTION, 0.2) + + def test_time_run_current(self): + self.start_transaction() + time.sleep(0.1) + self.expect_timer(lib.STM_TIME_RUN_CURRENT, 0.1) + time.sleep(0.1) + self.expect_timer(lib.STM_TIME_RUN_CURRENT, 0.2) + self.commit_transaction() + self.expect_timer(lib.STM_TIME_RUN_CURRENT, 0.0) + + def test_time_run_committed(self): + self.start_transaction() + time.sleep(0.2) + self.expect_timer(lib.STM_TIME_RUN_COMMITTED, 0.0) + self.commit_transaction() + self.expect_timer(lib.STM_TIME_RUN_COMMITTED, 0.2) diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -3,34 +3,36 @@ C7HEADERS = ../c7/stmgc.h ../c7/stm/*.h +COMMON = -pthread -lrt -g -Wall -all: duhton_debug duhton + +all: duhton_debug duhton duhton: *.c *.h $(C7SOURCES) $(C7HEADERS) - clang -pthread -g -O2 -o duhton *.c ../c7/stmgc.c -Wall + clang $(COMMON) -O2 -o duhton *.c ../c7/stmgc.c duhton_release: *.c *.h $(C7SOURCES) $(C7HEADERS) - clang -pthread -g -DNDEBUG -O2 -o duhton_release *.c ../c7/stmgc.c -Wall + clang $(COMMON) -DNDEBUG -O2 -o duhton_release *.c ../c7/stmgc.c duhton_debug: *.c *.h $(C7SOURCES) $(C7HEADERS) - clang -DSTM_DEBUGPRINT -pthread -g -DDu_DEBUG -o duhton_debug *.c ../c7/stmgc.c -Wall + clang -DSTM_DEBUGPRINT $(COMMON) -DDu_DEBUG -o duhton_debug *.c ../c7/stmgc.c duhton_nostm: *.c *.h ../gil-c7/stmgc.? - clang -pthread -g -DNDEBUG -O2 -o duhton_nostm *.c ../gil-c7/stmgc.c -Wall -DUSE_GIL + clang $(COMMON) -DNDEBUG -O2 -o duhton_nostm *.c ../gil-c7/stmgc.c -DUSE_GIL duhton_debug_nostm: *.c *.h ../gil-c7/stmgc.? - clang -DSTM_DEBUGPRINT -pthread -g -DDu_DEBUG -o duhton_debug_nostm *.c ../gil-c7/stmgc.c -Wall -DUSE_GIL -ferror-limit=1 + clang -DSTM_DEBUGPRINT $(COMMON) -DDu_DEBUG -o duhton_debug_nostm *.c ../gil-c7/stmgc.c -DUSE_GIL -ferror-limit=1 duhton_htm: *.c *.h ../htm-c7/stmgc.? ../htm-c7/htm.h - clang -pthread -g -DNDEBUG -O2 -o duhton_htm *.c ../htm-c7/stmgc.c -Wall -DUSE_HTM + clang $(COMMON) -DNDEBUG -O2 -o duhton_htm *.c ../htm-c7/stmgc.c -DUSE_HTM duhton_debug_htm: *.c *.h ../htm-c7/stmgc.? ../htm-c7/htm.h - clang -DSTM_DEBUGPRINT -pthread -g -DDu_DEBUG -o duhton_debug_htm *.c ../htm-c7/stmgc.c -Wall -DUSE_HTM -ferror-limit=1 + clang -DSTM_DEBUGPRINT $(COMMON) -DDu_DEBUG -o duhton_debug_htm *.c ../htm-c7/stmgc.c -DUSE_HTM -ferror-limit=1 clean: From noreply at buildbot.pypy.org Sun Mar 30 19:31:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 30 Mar 2014 19:31:31 +0200 (CEST) Subject: [pypy-commit] stmgc default: Adapt demo2.c to display the timers at the end Message-ID: <20140330173131.BCCC51C1154@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1120:47c979dca6e6 Date: 2014-03-30 19:31 +0200 http://bitbucket.org/pypy/stmgc/changeset/47c979dca6e6/ Log: Adapt demo2.c to display the timers at the end diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -184,6 +184,18 @@ static sem_t done; +void unregister_thread_local(void) +{ + int i; + stm_flush_timing(&stm_thread_local); + for (i = 0; i < _STM_TIME_N; i++) { + fprintf(stderr, "timer %2d: %.6f\n", i, + (double)stm_thread_local.timing[i]); + } + + stm_unregister_thread_local(&stm_thread_local); +} + void *demo2(void *arg) { int status; @@ -198,7 +210,7 @@ STM_POP_ROOT(stm_thread_local, global_chained_list); assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); - stm_unregister_thread_local(&stm_thread_local); + unregister_thread_local(); status = sem_post(&done); assert(status == 0); return NULL; } @@ -255,7 +267,7 @@ final_check(); - stm_unregister_thread_local(&stm_thread_local); + unregister_thread_local(); stm_teardown(); return 0; diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -459,6 +459,9 @@ minor_collection(/*commit=*/ true); + /* the call to minor_collection() above leaves us with + STM_TIME_BOOKKEEPING */ + s_mutex_lock(); restart: diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -134,7 +134,7 @@ if (is_major_collection_requested()) { /* if still true */ - enum stm_time_e oldstate = change_timing_state(STM_TIME_MAJOR_GC); + int oldstate = change_timing_state(STM_TIME_MAJOR_GC); synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -182,6 +182,7 @@ } /* No segment available. Wait until release_thread_segment() signals that one segment has been freed. */ + change_timing_state_tl(tl, STM_TIME_WAIT_FREE_SEGMENT); cond_wait(C_SEGMENT_FREE); /* Return false to the caller, which will call us again */ @@ -309,6 +310,10 @@ static void enter_safe_point_if_requested(void) { + if (STM_SEGMENT->nursery_end == NURSERY_END) + return; /* fast path: no safe point requested */ + + int previous_state = -1; assert(_seems_to_be_running_transaction()); assert(_has_mutex()); while (1) { @@ -325,11 +330,18 @@ #ifdef STM_TESTS abort_with_mutex(); #endif + if (previous_state == -1) { + previous_state = change_timing_state(STM_TIME_SYNC_PAUSE); + } cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED; cond_wait(C_REQUEST_REMOVED); STM_PSEGMENT->safe_point = SP_RUNNING; } + + if (previous_state != -1) { + change_timing_state(previous_state); + } } static void synchronize_all_threads(enum sync_type_e sync_type) diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -40,5 +40,5 @@ void stm_flush_timing(stm_thread_local_t *tl) { - change_timing_state_tl(tl, tl->_timing_cur_state); + TIMING_CHANGE(tl, tl->_timing_cur_state); } From noreply at buildbot.pypy.org Sun Mar 30 19:58:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 30 Mar 2014 19:58:49 +0200 (CEST) Subject: [pypy-commit] stmgc default: Finish the hopefully-correct attribution of times in contention. More tests. Message-ID: <20140330175849.0C8C31C0034@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1121:f7a1890045b3 Date: 2014-03-30 19:58 +0200 http://bitbucket.org/pypy/stmgc/changeset/f7a1890045b3/ Log: Finish the hopefully-correct attribution of times in contention. More tests. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -122,8 +122,8 @@ #endif /* Fix the choices that are found incorrect due to TS_INEVITABLE - or NSE_SIGABORT */ - if (contmgr.other_pseg->pub.nursery_end == NSE_SIGABORT) { + or is_abort() */ + if (is_abort(contmgr.other_pseg->pub.nursery_end)) { contmgr.abort_other = true; contmgr.try_sleep = false; } @@ -136,6 +136,19 @@ contmgr.abort_other = false; } + + int wait_category = + kind == WRITE_READ_CONTENTION ? STM_TIME_WAIT_WRITE_READ : + kind == INEVITABLE_CONTENTION ? STM_TIME_WAIT_INEVITABLE : + STM_TIME_WAIT_OTHER; + + int abort_category = + kind == WRITE_WRITE_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_WRITE : + kind == WRITE_READ_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_READ : + kind == INEVITABLE_CONTENTION ? STM_TIME_RUN_ABORTED_INEVITABLE : + STM_TIME_RUN_ABORTED_OTHER; + + if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION && contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) { /* Sleep. @@ -149,6 +162,8 @@ */ contmgr.other_pseg->signal_when_done = true; + change_timing_state(wait_category); + /* XXX should also tell other_pseg "please commit soon" */ dprintf(("pausing...\n")); @@ -160,15 +175,20 @@ if (must_abort()) abort_with_mutex(); + + change_timing_state(STM_TIME_RUN_CURRENT); } + else if (!contmgr.abort_other) { dprintf(("abort in contention\n")); + STM_SEGMENT->nursery_end = abort_category; abort_with_mutex(); } + else { /* We have to signal the other thread to abort, and wait until it does. */ - contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT; + contmgr.other_pseg->pub.nursery_end = abort_category; int sp = contmgr.other_pseg->safe_point; switch (sp) { diff --git a/c7/stm/contention.h b/c7/stm/contention.h --- a/c7/stm/contention.h +++ b/c7/stm/contention.h @@ -3,7 +3,11 @@ static void write_read_contention_management(uint8_t other_segment_num); static void inevitable_contention_management(uint8_t other_segment_num); +static inline bool is_abort(uintptr_t nursery_end) { + return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE); +} + static inline bool is_aborting_now(uint8_t other_segment_num) { - return (get_segment(other_segment_num)->nursery_end == NSE_SIGABORT && + return (is_abort(get_segment(other_segment_num)->nursery_end) && get_priv_segment(other_segment_num)->safe_point != SP_RUNNING); } diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -435,7 +435,7 @@ list_clear(STM_PSEGMENT->modified_old_objects); } -static void _finish_transaction(enum stm_time_e attribute_to) +static void _finish_transaction(int attribute_to) { STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; @@ -636,13 +636,16 @@ /* invoke the callbacks */ invoke_and_clear_callbacks_on_abort(); - if (STM_SEGMENT->nursery_end == NSE_SIGABORT) { + int attribute_to = STM_TIME_RUN_ABORTED_OTHER; + + if (is_abort(STM_SEGMENT->nursery_end)) { /* done aborting */ + attribute_to = STM_SEGMENT->nursery_end; STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE : NURSERY_END; } - _finish_transaction(STM_TIME_RUN_ABORTED_OTHER); + _finish_transaction(attribute_to); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ /* Broadcast C_ABORTED to wake up contention.c */ diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,10 +1,6 @@ /* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ -#define NSE_SIGPAUSE 0 -#define NSE_SIGABORT 1 -#if NSE_SIGABORT > _STM_NSE_SIGNAL_MAX -# error "update _STM_NSE_SIGNAL_MAX" -#endif +#define NSE_SIGPAUSE STM_TIME_WAIT_OTHER static uint32_t highest_overflow_number; @@ -14,9 +10,7 @@ static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg); static void major_do_minor_collections(void); -static inline bool must_abort(void) { - return STM_SEGMENT->nursery_end == NSE_SIGABORT; -} +#define must_abort() is_abort(STM_SEGMENT->nursery_end) static void assert_memset_zero(void *s, size_t n); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -63,9 +63,9 @@ STM_TIME_RUN_ABORTED_INEVITABLE, STM_TIME_RUN_ABORTED_OTHER, STM_TIME_WAIT_FREE_SEGMENT, - STM_TIME_WAIT_WRITE_WRITE, STM_TIME_WAIT_WRITE_READ, STM_TIME_WAIT_INEVITABLE, + STM_TIME_WAIT_OTHER, STM_TIME_BOOKKEEPING, STM_TIME_MINOR_GC, STM_TIME_MAJOR_GC, @@ -136,7 +136,7 @@ #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 -#define _STM_NSE_SIGNAL_MAX 1 +#define _STM_NSE_SIGNAL_MAX _STM_TIME_N #define _STM_FAST_ALLOC (66*1024) diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -108,9 +108,9 @@ #define STM_TIME_RUN_ABORTED_INEVITABLE ... #define STM_TIME_RUN_ABORTED_OTHER ... #define STM_TIME_WAIT_FREE_SEGMENT ... -#define STM_TIME_WAIT_WRITE_WRITE ... #define STM_TIME_WAIT_WRITE_READ ... #define STM_TIME_WAIT_INEVITABLE ... +#define STM_TIME_WAIT_OTHER ... #define STM_TIME_BOOKKEEPING ... #define STM_TIME_MINOR_GC ... #define STM_TIME_MAJOR_GC ... diff --git a/c7/test/test_timing.py b/c7/test/test_timing.py --- a/c7/test/test_timing.py +++ b/c7/test/test_timing.py @@ -12,7 +12,12 @@ def expect_timer(self, n, expected_value): real = self.gettimer(n) print 'timer %d is %s, expecting %s' % (n, real, expected_value) - assert abs(real - expected_value) < 0.09 + if expected_value == 0.0: + assert real == 0.0 + elif expected_value == "nonzero": + assert real > 0.0 + else: + assert abs(real - expected_value) < 0.09 def test_time_outside_transaction(self): time.sleep(0.2) @@ -35,3 +40,55 @@ self.expect_timer(lib.STM_TIME_RUN_COMMITTED, 0.0) self.commit_transaction() self.expect_timer(lib.STM_TIME_RUN_COMMITTED, 0.2) + + def test_time_run_aborted_write_write(self): + o = stm_allocate_old(16) + self.start_transaction() + stm_write(o) + # + self.switch(1) + self.start_transaction() + time.sleep(0.2) + py.test.raises(Conflict, stm_write, o) + self.expect_timer(lib.STM_TIME_RUN_ABORTED_WRITE_WRITE, 0.2) + + def test_time_run_aborted_write_read(self): + o = stm_allocate_old(16) + self.start_transaction() + stm_read(o) + # + self.switch(1) + self.start_transaction() + time.sleep(0.2) + stm_write(o) + py.test.raises(Conflict, self.commit_transaction) + self.expect_timer(lib.STM_TIME_RUN_ABORTED_WRITE_READ, 0.2) + + def test_time_run_aborted_inevitable(self): + self.start_transaction() + self.become_inevitable() + # + self.switch(1) + self.start_transaction() + time.sleep(0.2) + py.test.raises(Conflict, self.become_inevitable) + self.expect_timer(lib.STM_TIME_RUN_ABORTED_INEVITABLE, 0.2) + + def test_time_run_aborted_other(self): + self.start_transaction() + time.sleep(0.2) + self.abort_transaction() + self.expect_timer(lib.STM_TIME_RUN_ABORTED_OTHER, 0.2) + + def test_time_minor_gc(self): + self.start_transaction() + self.expect_timer(lib.STM_TIME_MINOR_GC, 0.0) + stm_minor_collect() + self.expect_timer(lib.STM_TIME_MINOR_GC, "nonzero") + self.expect_timer(lib.STM_TIME_MAJOR_GC, 0.0) + + def test_time_major_gc(self): + self.start_transaction() + self.expect_timer(lib.STM_TIME_MAJOR_GC, 0.0) + stm_major_collect() + self.expect_timer(lib.STM_TIME_MAJOR_GC, "nonzero") From noreply at buildbot.pypy.org Sun Mar 30 21:13:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 30 Mar 2014 21:13:56 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add a 'verbose' argument to stm_flush_timing() Message-ID: <20140330191356.902011C0034@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1122:976a1d42a508 Date: 2014-03-30 21:13 +0200 http://bitbucket.org/pypy/stmgc/changeset/976a1d42a508/ Log: Add a 'verbose' argument to stm_flush_timing() diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -186,13 +186,7 @@ void unregister_thread_local(void) { - int i; - stm_flush_timing(&stm_thread_local); - for (i = 0; i < _STM_TIME_N; i++) { - fprintf(stderr, "timer %2d: %.6f\n", i, - (double)stm_thread_local.timing[i]); - } - + stm_flush_timing(&stm_thread_local, 1); stm_unregister_thread_local(&stm_thread_local); } diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -38,7 +38,37 @@ tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; } -void stm_flush_timing(stm_thread_local_t *tl) +static const char *timer_names[] = { + "outside transaction", + "run current", + "run committed", + "run aborted write write", + "run aborted write read", + "run aborted inevitable", + "run aborted other", + "wait free segment", + "wait write read", + "wait inevitable", + "wait other", + "bookkeeping", + "minor gc", + "major gc", + "sync pause", +}; + +void stm_flush_timing(stm_thread_local_t *tl, int verbose) { TIMING_CHANGE(tl, tl->_timing_cur_state); + + assert((sizeof(timer_names) / sizeof(timer_names[0])) == _STM_TIME_N); + if (verbose > 0) { + int i; + s_mutex_lock(); + fprintf(stderr, "thread %p:\n", tl); + for (i = 0; i < _STM_TIME_N; i++) { + fprintf(stderr, " %-24s %.3f s\n", + timer_names[i], (double)tl->timing[i]); + } + s_mutex_unlock(); + } } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -360,7 +360,7 @@ /* Temporary? */ -void stm_flush_timing(stm_thread_local_t *); +void stm_flush_timing(stm_thread_local_t *tl, int verbose); /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -116,7 +116,7 @@ #define STM_TIME_MAJOR_GC ... #define STM_TIME_SYNC_PAUSE ... -void stm_flush_timing(stm_thread_local_t *); +void stm_flush_timing(stm_thread_local_t *, int); """) diff --git a/c7/test/test_timing.py b/c7/test/test_timing.py --- a/c7/test/test_timing.py +++ b/c7/test/test_timing.py @@ -6,7 +6,7 @@ def gettimer(self, n): tl = self.tls[self.current_thread] - lib.stm_flush_timing(tl) + lib.stm_flush_timing(tl, 1) return tl.timing[n] def expect_timer(self, n, expected_value): diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -186,6 +186,7 @@ } + stm_flush_timing(&stm_thread_local, 1); stm_unregister_thread_local(&stm_thread_local); return NULL; From noreply at buildbot.pypy.org Sun Mar 30 21:30:36 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 30 Mar 2014 21:30:36 +0200 (CEST) Subject: [pypy-commit] pypy default: revert ccdd17cd5300, it causes more problems than it solves Message-ID: <20140330193036.3D97D1C1154@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70336:6b35cdb1c211 Date: 2014-03-30 22:29 +0300 http://bitbucket.org/pypy/pypy/changeset/6b35cdb1c211/ Log: revert ccdd17cd5300, it causes more problems than it solves diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -64,8 +64,6 @@ kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] - # tests are not strictly ansi C compliant, compile as C++ - kwds["compile_extra"].append("/TP") # prevent linking with PythonXX.lib w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2] kwds["link_extra"] = ["/NODEFAULTLIB:Python%d%d.lib" % From noreply at buildbot.pypy.org Sun Mar 30 21:31:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 30 Mar 2014 21:31:51 +0200 (CEST) Subject: [pypy-commit] benchmarks default: Shut down the threads explicitly at the end (for now, needed to get timing reports) Message-ID: <20140330193151.0BBFC1C14E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r243:79769fa4155a Date: 2014-03-30 21:25 +0200 http://bitbucket.org/pypy/benchmarks/changeset/79769fa4155a/ Log: Shut down the threads explicitly at the end (for now, needed to get timing reports) diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -1,6 +1,6 @@ from Queue import Queue, Empty, Full from threading import Thread, Condition, Lock -import thread +import thread, atexit, sys try: from __pypy__.thread import atomic, getsegmentlimit @@ -30,14 +30,21 @@ class ThreadPool(object): def __init__(self): self.input_queue = Queue() - for n in range(getsegmentlimit()): - Worker(self.input_queue) + n_workers = getsegmentlimit() + self.workers = [Worker(self.input_queue) for i in range(n_workers)] def add_task(self, func, *args, **kwds): self.input_queue.put((func, args, kwds)) + def shutdown(self): + for w in self.workers: + self.input_queue.put((sys.exit, (), {})) + for w in self.workers: + w.join() + + _thread_pool = ThreadPool() - +atexit.register(_thread_pool.shutdown) From noreply at buildbot.pypy.org Sun Mar 30 21:32:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 30 Mar 2014 21:32:25 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/976a1d42a508 Message-ID: <20140330193225.DA6621C14E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70337:720e8ae12d2f Date: 2014-03-30 21:26 +0200 http://bitbucket.org/pypy/pypy/changeset/720e8ae12d2f/ Log: import stmgc/976a1d42a508 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -4d330c8e6b92 +976a1d42a508 diff --git a/rpython/translator/stm/src_stm/stm/contention.c b/rpython/translator/stm/src_stm/stm/contention.c --- a/rpython/translator/stm/src_stm/stm/contention.c +++ b/rpython/translator/stm/src_stm/stm/contention.c @@ -123,8 +123,8 @@ #endif /* Fix the choices that are found incorrect due to TS_INEVITABLE - or NSE_SIGABORT */ - if (contmgr.other_pseg->pub.nursery_end == NSE_SIGABORT) { + or is_abort() */ + if (is_abort(contmgr.other_pseg->pub.nursery_end)) { contmgr.abort_other = true; contmgr.try_sleep = false; } @@ -137,6 +137,19 @@ contmgr.abort_other = false; } + + int wait_category = + kind == WRITE_READ_CONTENTION ? STM_TIME_WAIT_WRITE_READ : + kind == INEVITABLE_CONTENTION ? STM_TIME_WAIT_INEVITABLE : + STM_TIME_WAIT_OTHER; + + int abort_category = + kind == WRITE_WRITE_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_WRITE : + kind == WRITE_READ_CONTENTION ? STM_TIME_RUN_ABORTED_WRITE_READ : + kind == INEVITABLE_CONTENTION ? STM_TIME_RUN_ABORTED_INEVITABLE : + STM_TIME_RUN_ABORTED_OTHER; + + if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION && contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) { /* Sleep. @@ -150,6 +163,10 @@ */ contmgr.other_pseg->signal_when_done = true; + change_timing_state(wait_category); + + /* XXX should also tell other_pseg "please commit soon" */ + dprintf(("pausing...\n")); cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_TRANSACTION_DONE; @@ -159,15 +176,20 @@ if (must_abort()) abort_with_mutex(); + + change_timing_state(STM_TIME_RUN_CURRENT); } + else if (!contmgr.abort_other) { dprintf(("abort in contention\n")); + STM_SEGMENT->nursery_end = abort_category; abort_with_mutex(); } + else { /* We have to signal the other thread to abort, and wait until it does. */ - contmgr.other_pseg->pub.nursery_end = NSE_SIGABORT; + contmgr.other_pseg->pub.nursery_end = abort_category; int sp = contmgr.other_pseg->safe_point; switch (sp) { diff --git a/rpython/translator/stm/src_stm/stm/contention.h b/rpython/translator/stm/src_stm/stm/contention.h --- a/rpython/translator/stm/src_stm/stm/contention.h +++ b/rpython/translator/stm/src_stm/stm/contention.h @@ -4,7 +4,11 @@ static void write_read_contention_management(uint8_t other_segment_num); static void inevitable_contention_management(uint8_t other_segment_num); +static inline bool is_abort(uintptr_t nursery_end) { + return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE); +} + static inline bool is_aborting_now(uint8_t other_segment_num) { - return (get_segment(other_segment_num)->nursery_end == NSE_SIGABORT && + return (is_abort(get_segment(other_segment_num)->nursery_end) && get_priv_segment(other_segment_num)->safe_point != SP_RUNNING); } diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -173,7 +173,7 @@ retry: if (jmpbuf == NULL) { - wait_for_end_of_inevitable_transaction(false); + wait_for_end_of_inevitable_transaction(tl); } if (!acquire_thread_segment(tl)) @@ -182,6 +182,8 @@ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); assert(STM_PSEGMENT->transaction_state == TS_NONE); + change_timing_state(STM_TIME_RUN_CURRENT); + STM_PSEGMENT->start_time = tl->_timing_cur_start; STM_PSEGMENT->safe_point = SP_RUNNING; STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR : TS_INEVITABLE); @@ -434,7 +436,7 @@ list_clear(STM_PSEGMENT->modified_old_objects); } -static void _finish_transaction(void) +static void _finish_transaction(int attribute_to) { STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; @@ -443,6 +445,8 @@ LIST_FREE(STM_PSEGMENT->objects_pointing_to_nursery); LIST_FREE(STM_PSEGMENT->large_overflow_objects); + timing_end_transaction(attribute_to); + stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ @@ -456,6 +460,9 @@ minor_collection(/*commit=*/ true); + /* the call to minor_collection() above leaves us with + STM_TIME_BOOKKEEPING */ + s_mutex_lock(); restart: @@ -506,7 +513,7 @@ } /* done */ - _finish_transaction(); + _finish_transaction(STM_TIME_RUN_COMMITTED); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ s_mutex_unlock(); @@ -630,13 +637,16 @@ /* invoke the callbacks */ invoke_and_clear_callbacks_on_abort(); - if (STM_SEGMENT->nursery_end == NSE_SIGABORT) { + int attribute_to = STM_TIME_RUN_ABORTED_OTHER; + + if (is_abort(STM_SEGMENT->nursery_end)) { /* done aborting */ + attribute_to = STM_SEGMENT->nursery_end; STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE : NURSERY_END; } - _finish_transaction(); + _finish_transaction(attribute_to); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ /* Broadcast C_ABORTED to wake up contention.c */ @@ -669,7 +679,7 @@ if (STM_PSEGMENT->transaction_state == TS_REGULAR) { dprintf(("become_inevitable: %s\n", msg)); - wait_for_end_of_inevitable_transaction(true); + wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; STM_SEGMENT->jmpbuf_ptr = NULL; clear_callbacks_on_abort(); diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -29,6 +29,8 @@ #define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) +#define OLD_RM_START ((END_NURSERY_PAGE * 4096UL) >> 4) +#define FIRST_OLD_RM_PAGE (OLD_RM_START / 4096UL) #define NB_READMARKER_PAGES (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) #define WRITELOCK_START ((END_NURSERY_PAGE * 4096UL) >> 4) @@ -120,7 +122,7 @@ /* Start time: to know approximately for how long a transaction has been running, in contention management */ - uint64_t start_time; + double start_time; /* This is the number stored in the overflowed objects (a multiple of GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -135,11 +135,15 @@ if (is_major_collection_requested()) { /* if still true */ + int oldstate = change_timing_state(STM_TIME_MAJOR_GC); + synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK); if (is_major_collection_requested()) { /* if *still* true */ major_collection_now_at_safe_point(); } + + change_timing_state(oldstate); } s_mutex_unlock(); diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -319,7 +319,11 @@ stm_safe_point(); + change_timing_state(STM_TIME_MINOR_GC); + _do_minor_collection(commit); + + change_timing_state(commit ? STM_TIME_BOOKKEEPING : STM_TIME_RUN_CURRENT); } void stm_collect(long level) diff --git a/rpython/translator/stm/src_stm/stm/nursery.h b/rpython/translator/stm/src_stm/stm/nursery.h --- a/rpython/translator/stm/src_stm/stm/nursery.h +++ b/rpython/translator/stm/src_stm/stm/nursery.h @@ -1,11 +1,7 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ /* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ -#define NSE_SIGPAUSE 0 -#define NSE_SIGABORT 1 -#if NSE_SIGABORT > _STM_NSE_SIGNAL_MAX -# error "update _STM_NSE_SIGNAL_MAX" -#endif +#define NSE_SIGPAUSE STM_TIME_WAIT_OTHER static uint32_t highest_overflow_number; @@ -15,9 +11,7 @@ static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg); static void major_do_minor_collections(void); -static inline bool must_abort(void) { - return STM_SEGMENT->nursery_end == NSE_SIGABORT; -} +#define must_abort() is_abort(STM_SEGMENT->nursery_end) static void assert_memset_zero(void *s, size_t n); diff --git a/rpython/translator/stm/src_stm/stm/pages.c b/rpython/translator/stm/src_stm/stm/pages.c --- a/rpython/translator/stm/src_stm/stm/pages.c +++ b/rpython/translator/stm/src_stm/stm/pages.c @@ -170,25 +170,24 @@ increment_total_allocated(total); } +static void pages_setup_readmarkers_for_nursery(void) +{ + /* The nursery page's read markers are never read, but must still + be writeable. We'd like to map the pages to a general "trash + page"; missing one, we remap all the pages over to the same one. + We still keep one page *per segment* to avoid cross-CPU cache + conflicts. -#if 0 -static bool is_fully_in_shared_pages(object_t *obj) -{ - uintptr_t first_page = ((uintptr_t)obj) / 4096UL; + (XXX no performance difference measured so far) + */ + long i, j; + for (i = 1; i <= NB_SEGMENTS; i++) { + char *segment_base = get_segment_base(i); - if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) - return (flag_page_private[first_page] == SHARED_PAGE); - - ssize_t obj_size = stmcb_size_rounded_up( - (struct object_s *)REAL_ADDRESS(stm_object_pages, obj)); - - uintptr_t last_page = (((uintptr_t)obj) + obj_size - 1) / 4096UL; - - do { - if (flag_page_private[first_page++] != SHARED_PAGE) - return false; - } while (first_page <= last_page); - - return true; + for (j = FIRST_READMARKER_PAGE + 1; j < FIRST_OLD_RM_PAGE; j++) { + remap_file_pages(segment_base + 4096 * j, 4096, 0, + i * NB_PAGES + FIRST_READMARKER_PAGE, 0); + /* errors here ignored */ + } + } } -#endif diff --git a/rpython/translator/stm/src_stm/stm/pages.h b/rpython/translator/stm/src_stm/stm/pages.h --- a/rpython/translator/stm/src_stm/stm/pages.h +++ b/rpython/translator/stm/src_stm/stm/pages.h @@ -40,6 +40,7 @@ static void page_privatize(uintptr_t pagenum); static void page_reshare(uintptr_t pagenum); static void _page_do_reshare(long segnum, uintptr_t pagenum); +static void pages_setup_readmarkers_for_nursery(void); /* Note: don't ever do "mutex_pages_lock(); mutex_lock()" in that order */ static void mutex_pages_lock(void); diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -37,6 +37,7 @@ (FIRST_READMARKER_PAGE - 2) * 4096UL, PROT_NONE); } + pages_setup_readmarkers_for_nursery(); } void stm_setup(void) @@ -169,6 +170,8 @@ num = tl->prev->associated_segment_num; } tl->thread_local_obj = NULL; + tl->_timing_cur_state = STM_TIME_OUTSIDE_TRANSACTION; + tl->_timing_cur_start = get_stm_time(); /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own diff --git a/rpython/translator/stm/src_stm/stm/sync.c b/rpython/translator/stm/src_stm/stm/sync.c --- a/rpython/translator/stm/src_stm/stm/sync.c +++ b/rpython/translator/stm/src_stm/stm/sync.c @@ -32,7 +32,6 @@ pthread_cond_t cond[_C_TOTAL]; /* some additional pieces of global state follow */ uint8_t in_use1[NB_SEGMENTS]; /* 1 if running a pthread */ - uint64_t global_time; }; char reserved[192]; } sync_ctl __attribute__((aligned(64))); @@ -121,13 +120,14 @@ /************************************************************/ -static void wait_for_end_of_inevitable_transaction(bool can_abort) +static void wait_for_end_of_inevitable_transaction( + stm_thread_local_t *tl_or_null_if_can_abort) { long i; restart: for (i = 1; i <= NB_SEGMENTS; i++) { if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { - if (can_abort) { + if (tl_or_null_if_can_abort == NULL) { /* handle this case like a contention: it will either abort us (not the other thread, which is inevitable), or wait for a while. If we go past this call, then we @@ -138,7 +138,11 @@ else { /* wait for stm_commit_transaction() to finish this inevitable transaction */ + change_timing_state_tl(tl_or_null_if_can_abort, + STM_TIME_WAIT_INEVITABLE); cond_wait(C_INEVITABLE); + /* don't bother changing the timing state again: the caller + will very soon go to STM_TIME_RUN_CURRENT */ } goto restart; } @@ -179,6 +183,7 @@ } /* No segment available. Wait until release_thread_segment() signals that one segment has been freed. */ + change_timing_state_tl(tl, STM_TIME_WAIT_FREE_SEGMENT); cond_wait(C_SEGMENT_FREE); /* Return false to the caller, which will call us again */ @@ -189,7 +194,6 @@ assert(STM_SEGMENT->segment_num == num); assert(STM_SEGMENT->running_thread == NULL); STM_SEGMENT->running_thread = tl; - STM_PSEGMENT->start_time = ++sync_ctl.global_time; return true; } @@ -307,6 +311,10 @@ static void enter_safe_point_if_requested(void) { + if (STM_SEGMENT->nursery_end == NURSERY_END) + return; /* fast path: no safe point requested */ + + int previous_state = -1; assert(_seems_to_be_running_transaction()); assert(_has_mutex()); while (1) { @@ -323,11 +331,18 @@ #ifdef STM_TESTS abort_with_mutex(); #endif + if (previous_state == -1) { + previous_state = change_timing_state(STM_TIME_SYNC_PAUSE); + } cond_signal(C_AT_SAFE_POINT); STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_REQUEST_REMOVED; cond_wait(C_REQUEST_REMOVED); STM_PSEGMENT->safe_point = SP_RUNNING; } + + if (previous_state != -1) { + change_timing_state(previous_state); + } } static void synchronize_all_threads(enum sync_type_e sync_type) diff --git a/rpython/translator/stm/src_stm/stm/sync.h b/rpython/translator/stm/src_stm/stm/sync.h --- a/rpython/translator/stm/src_stm/stm/sync.h +++ b/rpython/translator/stm/src_stm/stm/sync.h @@ -29,7 +29,7 @@ static bool acquire_thread_segment(stm_thread_local_t *tl); static void release_thread_segment(stm_thread_local_t *tl); -static void wait_for_end_of_inevitable_transaction(bool can_abort); +static void wait_for_end_of_inevitable_transaction(stm_thread_local_t *); enum sync_type_e { STOP_OTHERS_UNTIL_MUTEX_UNLOCK, diff --git a/rpython/translator/stm/src_stm/stm/timing.c b/rpython/translator/stm/src_stm/stm/timing.c new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/timing.c @@ -0,0 +1,75 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +static inline void add_timing(stm_thread_local_t *tl, enum stm_time_e category, + double elapsed) +{ + tl->timing[category] += elapsed; +} + +#define TIMING_CHANGE(tl, newstate) \ + double curtime = get_stm_time(); \ + double elasped = curtime - tl->_timing_cur_start; \ + enum stm_time_e oldstate = tl->_timing_cur_state; \ + add_timing(tl, oldstate, elasped); \ + tl->_timing_cur_state = newstate; \ + tl->_timing_cur_start = curtime + +static enum stm_time_e change_timing_state(enum stm_time_e newstate) +{ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + TIMING_CHANGE(tl, newstate); + return oldstate; +} + +static void change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate) +{ + TIMING_CHANGE(tl, newstate); +} + +static void timing_end_transaction(enum stm_time_e attribute_to) +{ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + TIMING_CHANGE(tl, STM_TIME_OUTSIDE_TRANSACTION); + add_timing(tl, attribute_to, tl->timing[STM_TIME_RUN_CURRENT]); + tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; +} + +static const char *timer_names[] = { + "outside transaction", + "run current", + "run committed", + "run aborted write write", + "run aborted write read", + "run aborted inevitable", + "run aborted other", + "wait free segment", + "wait write read", + "wait inevitable", + "wait other", + "bookkeeping", + "minor gc", + "major gc", + "sync pause", +}; + +void stm_flush_timing(stm_thread_local_t *tl, int verbose) +{ + TIMING_CHANGE(tl, tl->_timing_cur_state); + + assert((sizeof(timer_names) / sizeof(timer_names[0])) == _STM_TIME_N); + if (verbose > 0) { + int i; + s_mutex_lock(); + fprintf(stderr, "thread %p:\n", tl); + for (i = 0; i < _STM_TIME_N; i++) { + fprintf(stderr, " %-24s %.3f s\n", + timer_names[i], (double)tl->timing[i]); + } + s_mutex_unlock(); + } +} diff --git a/rpython/translator/stm/src_stm/stm/timing.h b/rpython/translator/stm/src_stm/stm/timing.h new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/timing.h @@ -0,0 +1,15 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#include + +static inline double get_stm_time(void) +{ + struct timespec tp; + clock_gettime(CLOCK_MONOTONIC, &tp); + return tp.tv_sec + tp.tv_nsec * 0.000000001; +} + +static enum stm_time_e change_timing_state(enum stm_time_e newstate); +static void change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate); + +static void timing_end_transaction(enum stm_time_e attribute_to); diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c --- a/rpython/translator/stm/src_stm/stmgc.c +++ b/rpython/translator/stm/src_stm/stmgc.c @@ -14,6 +14,7 @@ #include "stm/extra.h" #include "stm/fprintcolor.h" #include "stm/weakref.h" +#include "stm/timing.h" #include "stm/misc.c" #include "stm/list.c" @@ -32,3 +33,4 @@ #include "stm/extra.c" #include "stm/fprintcolor.c" #include "stm/weakref.c" +#include "stm/timing.c" diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -55,6 +55,25 @@ object_t *ss; }; +enum stm_time_e { + STM_TIME_OUTSIDE_TRANSACTION, + STM_TIME_RUN_CURRENT, + STM_TIME_RUN_COMMITTED, + STM_TIME_RUN_ABORTED_WRITE_WRITE, + STM_TIME_RUN_ABORTED_WRITE_READ, + STM_TIME_RUN_ABORTED_INEVITABLE, + STM_TIME_RUN_ABORTED_OTHER, + STM_TIME_WAIT_FREE_SEGMENT, + STM_TIME_WAIT_WRITE_READ, + STM_TIME_WAIT_INEVITABLE, + STM_TIME_WAIT_OTHER, + STM_TIME_BOOKKEEPING, + STM_TIME_MINOR_GC, + STM_TIME_MAJOR_GC, + STM_TIME_SYNC_PAUSE, + _STM_TIME_N +}; + typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; @@ -67,6 +86,10 @@ /* after an abort, some details about the abort are stored there. (these fields are not modified on a successful commit) */ long last_abort__bytes_in_nursery; + /* timing information, accumulated */ + float timing[_STM_TIME_N]; + double _timing_cur_start; + enum stm_time_e _timing_cur_state; /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; @@ -114,7 +137,7 @@ #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 -#define _STM_NSE_SIGNAL_MAX 1 +#define _STM_NSE_SIGNAL_MAX _STM_TIME_N #define _STM_FAST_ALLOC (66*1024) @@ -337,6 +360,10 @@ const char *msg); +/* Temporary? */ +void stm_flush_timing(stm_thread_local_t *tl, int verbose); + + /* ==================== END ==================== */ #endif From noreply at buildbot.pypy.org Sun Mar 30 21:32:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 30 Mar 2014 21:32:27 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: For now, explicitly call stm_flush_timing(verbose=1). Message-ID: <20140330193227.305501C14E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70338:17cf4e920972 Date: 2014-03-30 21:31 +0200 http://bitbucket.org/pypy/pypy/changeset/17cf4e920972/ Log: For now, explicitly call stm_flush_timing(verbose=1). diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -914,6 +914,12 @@ stm_thread_local.mem_clear_on_abort = (char *)&pypy_g_ExcData; stm_thread_local.mem_bytes_to_clear_on_abort = sizeof(pypy_g_ExcData); } + +void pypy_stm_unregister_thread_local(void) +{ + stm_flush_timing(&stm_thread_local, 1); // XXX temporary + stm_unregister_thread_local(&stm_thread_local); +} ''' def commondefs(defines): diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -47,7 +47,7 @@ return 'pypy_stm_register_thread_local();' def stm_unregister_thread_local(funcgen, op): - return 'stm_unregister_thread_local(&stm_thread_local);' + return 'pypy_stm_unregister_thread_local();' def stm_read(funcgen, op): assert isinstance(op.args[0].concretetype, lltype.Ptr) diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -83,7 +83,7 @@ pypy_stm_ready_atomic = 1; stm_commit_transaction(); pypy_stm_ready_atomic = 0; - stm_unregister_thread_local(&stm_thread_local); + pypy_stm_unregister_thread_local(); errno = e; } else { diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -16,6 +16,7 @@ void pypy_stm_setup(void); void pypy_stm_setup_prebuilt(void); /* generated into stm_prebuilt.c */ void pypy_stm_register_thread_local(void); /* generated into stm_prebuilt.c */ +void pypy_stm_unregister_thread_local(void); /* generated into stm_prebuilt.c */ static inline void pypy_stm_commit_if_not_atomic(void) { int e = errno; From noreply at buildbot.pypy.org Sun Mar 30 22:13:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 30 Mar 2014 22:13:47 +0200 (CEST) Subject: [pypy-commit] stmgc default: Record the number of times events trigger, in addition to the total time Message-ID: <20140330201347.AF3C51C0034@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1123:43f1137bc72e Date: 2014-03-30 22:13 +0200 http://bitbucket.org/pypy/stmgc/changeset/43f1137bc72e/ Log: Record the number of times events trigger, in addition to the total time diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -7,6 +7,7 @@ double elapsed) { tl->timing[category] += elapsed; + tl->events[category] += 1; } #define TIMING_CHANGE(tl, newstate) \ @@ -58,7 +59,10 @@ void stm_flush_timing(stm_thread_local_t *tl, int verbose) { - TIMING_CHANGE(tl, tl->_timing_cur_state); + enum stm_time_e category = tl->_timing_cur_state; + uint64_t oldevents = tl->events[category]; + TIMING_CHANGE(tl, category); + tl->events[category] = oldevents; assert((sizeof(timer_names) / sizeof(timer_names[0])) == _STM_TIME_N); if (verbose > 0) { @@ -66,8 +70,8 @@ s_mutex_lock(); fprintf(stderr, "thread %p:\n", tl); for (i = 0; i < _STM_TIME_N; i++) { - fprintf(stderr, " %-24s %.3f s\n", - timer_names[i], (double)tl->timing[i]); + fprintf(stderr, " %-24s %9u %.3f s\n", + timer_names[i], tl->events[i], (double)tl->timing[i]); } s_mutex_unlock(); } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -86,6 +86,7 @@ (these fields are not modified on a successful commit) */ long last_abort__bytes_in_nursery; /* timing information, accumulated */ + uint32_t events[_STM_TIME_N]; float timing[_STM_TIME_N]; double _timing_cur_start; enum stm_time_e _timing_cur_state; diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -24,6 +24,7 @@ size_t mem_bytes_to_clear_on_abort; long last_abort__bytes_in_nursery; int associated_segment_num; + uint32_t events[]; float timing[]; ...; } stm_thread_local_t; diff --git a/c7/test/test_timing.py b/c7/test/test_timing.py --- a/c7/test/test_timing.py +++ b/c7/test/test_timing.py @@ -7,17 +7,20 @@ def gettimer(self, n): tl = self.tls[self.current_thread] lib.stm_flush_timing(tl, 1) - return tl.timing[n] + return tl.events[n], tl.timing[n] - def expect_timer(self, n, expected_value): - real = self.gettimer(n) - print 'timer %d is %s, expecting %s' % (n, real, expected_value) - if expected_value == 0.0: + def expect_timer(self, n, expected_time, expected_count='?'): + count, real = self.gettimer(n) + print 'timer %d is %d;%s, expecting %s;%s' % (n, count, real, + expected_count, expected_time) + if expected_time == 0.0: assert real == 0.0 - elif expected_value == "nonzero": + elif expected_time == "nonzero": assert real > 0.0 else: - assert abs(real - expected_value) < 0.09 + assert abs(real - expected_time) < 0.09 + if expected_count != '?': + assert count == expected_count def test_time_outside_transaction(self): time.sleep(0.2) @@ -28,18 +31,18 @@ def test_time_run_current(self): self.start_transaction() time.sleep(0.1) - self.expect_timer(lib.STM_TIME_RUN_CURRENT, 0.1) + self.expect_timer(lib.STM_TIME_RUN_CURRENT, 0.1, 0) time.sleep(0.1) - self.expect_timer(lib.STM_TIME_RUN_CURRENT, 0.2) + self.expect_timer(lib.STM_TIME_RUN_CURRENT, 0.2, 0) self.commit_transaction() - self.expect_timer(lib.STM_TIME_RUN_CURRENT, 0.0) + self.expect_timer(lib.STM_TIME_RUN_CURRENT, 0.0, 1) def test_time_run_committed(self): self.start_transaction() time.sleep(0.2) - self.expect_timer(lib.STM_TIME_RUN_COMMITTED, 0.0) + self.expect_timer(lib.STM_TIME_RUN_COMMITTED, 0.0, 0) self.commit_transaction() - self.expect_timer(lib.STM_TIME_RUN_COMMITTED, 0.2) + self.expect_timer(lib.STM_TIME_RUN_COMMITTED, 0.2, 1) def test_time_run_aborted_write_write(self): o = stm_allocate_old(16) @@ -50,7 +53,7 @@ self.start_transaction() time.sleep(0.2) py.test.raises(Conflict, stm_write, o) - self.expect_timer(lib.STM_TIME_RUN_ABORTED_WRITE_WRITE, 0.2) + self.expect_timer(lib.STM_TIME_RUN_ABORTED_WRITE_WRITE, 0.2, 1) def test_time_run_aborted_write_read(self): o = stm_allocate_old(16) @@ -62,7 +65,7 @@ time.sleep(0.2) stm_write(o) py.test.raises(Conflict, self.commit_transaction) - self.expect_timer(lib.STM_TIME_RUN_ABORTED_WRITE_READ, 0.2) + self.expect_timer(lib.STM_TIME_RUN_ABORTED_WRITE_READ, 0.2, 1) def test_time_run_aborted_inevitable(self): self.start_transaction() @@ -72,23 +75,23 @@ self.start_transaction() time.sleep(0.2) py.test.raises(Conflict, self.become_inevitable) - self.expect_timer(lib.STM_TIME_RUN_ABORTED_INEVITABLE, 0.2) + self.expect_timer(lib.STM_TIME_RUN_ABORTED_INEVITABLE, 0.2, 1) def test_time_run_aborted_other(self): self.start_transaction() time.sleep(0.2) self.abort_transaction() - self.expect_timer(lib.STM_TIME_RUN_ABORTED_OTHER, 0.2) + self.expect_timer(lib.STM_TIME_RUN_ABORTED_OTHER, 0.2, 1) def test_time_minor_gc(self): self.start_transaction() - self.expect_timer(lib.STM_TIME_MINOR_GC, 0.0) + self.expect_timer(lib.STM_TIME_MINOR_GC, 0.0, 0) stm_minor_collect() - self.expect_timer(lib.STM_TIME_MINOR_GC, "nonzero") - self.expect_timer(lib.STM_TIME_MAJOR_GC, 0.0) + self.expect_timer(lib.STM_TIME_MINOR_GC, "nonzero", 1) + self.expect_timer(lib.STM_TIME_MAJOR_GC, 0.0, 0) def test_time_major_gc(self): self.start_transaction() - self.expect_timer(lib.STM_TIME_MAJOR_GC, 0.0) + self.expect_timer(lib.STM_TIME_MAJOR_GC, 0.0, 0) stm_major_collect() - self.expect_timer(lib.STM_TIME_MAJOR_GC, "nonzero") + self.expect_timer(lib.STM_TIME_MAJOR_GC, "nonzero", 1) From noreply at buildbot.pypy.org Sun Mar 30 23:25:49 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 30 Mar 2014 23:25:49 +0200 (CEST) Subject: [pypy-commit] pypy default: variable declaration must precede usage in c Message-ID: <20140330212549.347DF1C02AE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70339:c4a26971721e Date: 2014-03-31 00:18 +0300 http://bitbucket.org/pypy/pypy/changeset/c4a26971721e/ Log: variable declaration must precede usage in c diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -238,8 +238,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *filenameObject = PyString_FromString("/path/to/file"); errno = EBADF; - PyObject *filenameObject = PyString_FromString("/path/to/file"); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, filenameObject); Py_DECREF(filenameObject); return NULL; @@ -257,8 +257,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *intObject = PyInt_FromLong(3); errno = EBADF; - PyObject *intObject = PyInt_FromLong(3); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, intObject); Py_DECREF(intObject); return NULL; @@ -276,8 +276,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three"); errno = EBADF; - PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three"); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, lst); Py_DECREF(lst); return NULL; @@ -295,8 +295,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three"); errno = EBADF; - PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three"); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, tuple); Py_DECREF(tuple); return NULL; @@ -314,8 +314,8 @@ module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' + PyObject *none = Py_BuildValue(""); errno = EBADF; - PyObject *none = Py_BuildValue(""); PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, none); Py_DECREF(none); return NULL; From noreply at buildbot.pypy.org Sun Mar 30 23:47:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 30 Mar 2014 23:47:13 +0200 (CEST) Subject: [pypy-commit] pypy default: variable declaration must precede usage in c Message-ID: <20140330214713.4193B1C14E8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70340:e8b425028c7f Date: 2014-03-31 00:45 +0300 http://bitbucket.org/pypy/pypy/changeset/e8b425028c7f/ Log: variable declaration must precede usage in c diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -246,9 +246,9 @@ ("test_FromAny", "METH_NOARGS", ''' npy_intp dims[2] ={2, 3}; - PyObject * obj1 = PyArray_SimpleNew(2, dims, 1); + PyObject * obj2, * obj1 = PyArray_SimpleNew(2, dims, 1); PyArray_FILLWBYTE(obj1, 42); - PyObject * obj2 = _PyArray_FromAny(obj1, NULL, 0, 0, 0, NULL); + obj2 = _PyArray_FromAny(obj1, NULL, 0, 0, 0, NULL); Py_DECREF(obj1); return obj2; ''' @@ -256,9 +256,9 @@ ("test_FromObject", "METH_NOARGS", ''' npy_intp dims[2] ={2, 3}; - PyObject * obj1 = PyArray_SimpleNew(2, dims, 1); + PyObject * obj2, * obj1 = PyArray_SimpleNew(2, dims, 1); PyArray_FILLWBYTE(obj1, 42); - PyObject * obj2 = _PyArray_FromObject(obj1, 12, 0, 0); + obj2 = _PyArray_FromObject(obj1, 12, 0, 0); Py_DECREF(obj1); return obj2; ''' From noreply at buildbot.pypy.org Mon Mar 31 00:14:40 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 31 Mar 2014 00:14:40 +0200 (CEST) Subject: [pypy-commit] pypy default: fix broken code apparently tested only on win32 Message-ID: <20140330221440.CBA801C0034@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70341:2c0593947580 Date: 2014-03-31 01:06 +0300 http://bitbucket.org/pypy/pypy/changeset/2c0593947580/ Log: fix broken code apparently tested only on win32 diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -219,6 +219,7 @@ if restype is None: import ctypes restype = ctypes.c_int + self._argtypes_ = argsl self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return From noreply at buildbot.pypy.org Mon Mar 31 07:32:52 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 31 Mar 2014 07:32:52 +0200 (CEST) Subject: [pypy-commit] pypy default: remove quotes for windows Message-ID: <20140331053252.15D7B1D2873@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70342:9308b3e48ee7 Date: 2014-03-31 08:06 +0300 http://bitbucket.org/pypy/pypy/changeset/9308b3e48ee7/ Log: remove quotes for windows diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -193,6 +193,7 @@ self.__class__) while True: try: + import pdb;pdb.set_trace() v = self._sslobj.write(data) except SSLError, x: if x.args[0] == SSL_ERROR_WANT_READ: diff --git a/pypy/module/test_lib_pypy/test_site_extra.py b/pypy/module/test_lib_pypy/test_site_extra.py --- a/pypy/module/test_lib_pypy/test_site_extra.py +++ b/pypy/module/test_lib_pypy/test_site_extra.py @@ -4,8 +4,11 @@ def test_preimported_modules(): lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings', 'exceptions', 'signal', 'sys', 'zipimport'] - g = os.popen('"%s" -c "import sys; print sorted(sys.modules)"' % - (sys.executable,)) + if sys.platform == 'win32': + cmd = '%s' % (sys.executable,) + else: + cmd = '"%s"' % (sys.executable,) + g = os.popen(cmd + ' -c "import sys; print sorted(sys.modules)"') real_data = g.read() g.close() for name in lst: From noreply at buildbot.pypy.org Mon Mar 31 07:34:41 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 31 Mar 2014 07:34:41 +0200 (CEST) Subject: [pypy-commit] pypy default: whoops, debug cruft Message-ID: <20140331053441.550DC1D2873@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70343:feecd34564ed Date: 2014-03-31 08:33 +0300 http://bitbucket.org/pypy/pypy/changeset/feecd34564ed/ Log: whoops, debug cruft diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -193,7 +193,6 @@ self.__class__) while True: try: - import pdb;pdb.set_trace() v = self._sslobj.write(data) except SSLError, x: if x.args[0] == SSL_ERROR_WANT_READ: From noreply at buildbot.pypy.org Mon Mar 31 08:46:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 31 Mar 2014 08:46:21 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/43f1137bc72e Message-ID: <20140331064621.8E30E1C0034@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70344:055b7a22861c Date: 2014-03-31 08:45 +0200 http://bitbucket.org/pypy/pypy/changeset/055b7a22861c/ Log: import stmgc/43f1137bc72e diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -976a1d42a508 +43f1137bc72e diff --git a/rpython/translator/stm/src_stm/stm/timing.c b/rpython/translator/stm/src_stm/stm/timing.c --- a/rpython/translator/stm/src_stm/stm/timing.c +++ b/rpython/translator/stm/src_stm/stm/timing.c @@ -8,6 +8,7 @@ double elapsed) { tl->timing[category] += elapsed; + tl->events[category] += 1; } #define TIMING_CHANGE(tl, newstate) \ @@ -59,7 +60,10 @@ void stm_flush_timing(stm_thread_local_t *tl, int verbose) { - TIMING_CHANGE(tl, tl->_timing_cur_state); + enum stm_time_e category = tl->_timing_cur_state; + uint64_t oldevents = tl->events[category]; + TIMING_CHANGE(tl, category); + tl->events[category] = oldevents; assert((sizeof(timer_names) / sizeof(timer_names[0])) == _STM_TIME_N); if (verbose > 0) { @@ -67,8 +71,8 @@ s_mutex_lock(); fprintf(stderr, "thread %p:\n", tl); for (i = 0; i < _STM_TIME_N; i++) { - fprintf(stderr, " %-24s %.3f s\n", - timer_names[i], (double)tl->timing[i]); + fprintf(stderr, " %-24s %9u %.3f s\n", + timer_names[i], tl->events[i], (double)tl->timing[i]); } s_mutex_unlock(); } diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -87,6 +87,7 @@ (these fields are not modified on a successful commit) */ long last_abort__bytes_in_nursery; /* timing information, accumulated */ + uint32_t events[_STM_TIME_N]; float timing[_STM_TIME_N]; double _timing_cur_start; enum stm_time_e _timing_cur_state; From noreply at buildbot.pypy.org Mon Mar 31 09:33:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 31 Mar 2014 09:33:34 +0200 (CEST) Subject: [pypy-commit] stmgc timelog: intermediate checkin Message-ID: <20140331073334.DD9361D28F6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: timelog Changeset: r1124:bd8dd6760887 Date: 2014-03-31 08:44 +0200 http://bitbucket.org/pypy/stmgc/changeset/bd8dd6760887/ Log: intermediate checkin diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -155,6 +155,9 @@ struct stm_shadowentry_s *shadowstack_at_start_of_transaction; object_t *threadlocal_at_start_of_transaction; + /* Time logging */ + stm_timelog_t *tlog; + /* For debugging */ #ifndef NDEBUG pthread_t running_pthread; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -340,16 +340,16 @@ /* ---------- timelogs ---------- */ enum { - STLOG_REASON_UNKNOWN, - STLOG_REASON_ABORT_SELF, - STLOG_REASON_ABORT_OTHER, - STLOG_REASON_PAUSE, + STM_LOG_REASON_UNKNOWN, + STM_LOG_REASON_ABORT_SELF, + STM_LOG_REASON_ABORT_OTHER, + STM_LOG_REASON_PAUSE, }; enum { - STLOG_CONTENTION_NONE, - STLOG_CONTENTION_WRITE_WRITE, - STLOG_CONTENTION_WRITE_READ, - STLOG_CONTENTION_INEVITABLE, + STM_LOG_CONTENTION_NONE, + STM_LOG_CONTENTION_WRITE_WRITE, + STM_LOG_CONTENTION_WRITE_READ, + STM_LOG_CONTENTION_INEVITABLE, }; typedef struct stm_timelog_s { diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -99,14 +99,14 @@ int stm_can_move(object_t *); void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); -#define STLOG_REASON_UNKNOWN ... -#define STLOG_REASON_ABORT_SELF ... -#define STLOG_REASON_ABORT_OTHER ... -#define STLOG_REASON_PAUSE ... -#define STLOG_CONTENTION_NONE ... -#define STLOG_CONTENTION_WRITE_WRITE ... -#define STLOG_CONTENTION_WRITE_READ ... -#define STLOG_CONTENTION_INEVITABLE ... +#define STM_LOG_REASON_UNKNOWN ... +#define STM_LOG_REASON_ABORT_SELF ... +#define STM_LOG_REASON_ABORT_OTHER ... +#define STM_LOG_REASON_PAUSE ... +#define STM_LOG_CONTENTION_NONE ... +#define STM_LOG_CONTENTION_WRITE_WRITE ... +#define STM_LOG_CONTENTION_WRITE_READ ... +#define STM_LOG_CONTENTION_INEVITABLE ... typedef struct { uint8_t reason; diff --git a/c7/test/test_timelog.py b/c7/test/test_timelog.py --- a/c7/test/test_timelog.py +++ b/c7/test/test_timelog.py @@ -19,8 +19,8 @@ self.start_transaction() tlog = self.fetch_and_remove_timelog() assert tlog != ffi.NULL - assert tlog.reason == lib.STLOG_REASON_UNKNOWN - assert tlog.contention == lib.STLOG_CONTENTION_NONE + assert tlog.reason == lib.STM_LOG_REASON_UNKNOWN + assert tlog.contention == lib.STM_LOG_CONTENTION_NONE assert tlog.user == 0 assert 0.0499 <= tlog.time_lost < 1.0 lib.stm_free_timelog(tlog) From noreply at buildbot.pypy.org Mon Mar 31 09:33:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 31 Mar 2014 09:33:36 +0200 (CEST) Subject: [pypy-commit] stmgc default: Update the text to describe the N+1 segments Message-ID: <20140331073336.078F31D28F6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1125:3db91dec36e2 Date: 2014-03-31 09:33 +0200 http://bitbucket.org/pypy/stmgc/changeset/3db91dec36e2/ Log: Update the text to describe the N+1 segments diff --git a/c7/README.txt b/c7/README.txt --- a/c7/README.txt +++ b/c7/README.txt @@ -57,7 +57,7 @@ We have a small, fixed number of big pieces of memory called "segments". Each segment has enough (virtual) address space for all the objects that the program needs. This is actually allocated from a single big mmap() -so that pages can be exchanged between segments with remap_file_pages(). +so that pages can be shared between segments with remap_file_pages(). We call N the number of segments. Actual threads are not limited in number; they grab one segment in order to run GC-manipulating code, and release it afterwards. This is similar to what occurs with the GIL, @@ -81,20 +81,26 @@ --- much like the OS does after a fork() for pages modified by one or the other process. -In more details: the first page of addresses in each thread-local region -(4096 bytes) is made non-accessible, to detect errors of accessing the -NULL pointer. The second page is reserved for thread-local data. The -rest is divided into 1/16 for thread-local read markers, followed by -15/16 for the real objects. We initially use remap_file_pages() on this -15/16 range. The read markers are described below. +In more details: we actually get N + 1 consecutive segments, and segment +number 0 is reserved to contain the globally committed state of the +objects. The segments actually used by threads are numbered from 1 to +N. The first page of addresses in each segment is made non-accessible, +to detect errors of accessing the NULL pointer. The second page is +reserved for thread-local data. The rest is divided into 1/16 for +thread-local read markers, followed by 15/16 for the real objects. The +read markers are described below. We use remap_file_pages() on this +15/16 range: every page in this range can be either remapped to the same +page from segment 0 ("shared", the initial state), or remapped back to +itself ("private"). -Each transaction records the objects that it changed. These are -necessarily within unshared pages. When we want to commit a -transaction, we ask for a safe-point (suspending the other threads in a -known state), and then we copy again the modified objects into the other -version(s) of that data. The point is that, from another thread's point -of view, the memory didn't appear to change unexpectedly, but only when -waiting in a safe-point. +Each transaction records the objects that it changed, and makes sure +that the corresponding pages are "private" in this segment. When we +want to commit a transaction, we ask for a safe-point (suspending the +other threads in a known state), and then we copy the modified objects +into the share pages, as well as into the other segments if they are +also backed by private pages. The point is that, from another thread's +point of view, the memory didn't appear to change unexpectedly, but only +when waiting in a safe-point. Moreover, we detect read-write conflicts when trying to commit. To do this, each transaction needs to track in their own (private) read @@ -105,11 +111,13 @@ requiring an abort (which it will do when trying to leave the safe-point). -On the other hand, write-write conflicts are detected eagerly, which is -necessary to avoid that all segments contain a modified version of the -object and no segment is left with the original version. It is done -with a compare-and-swap into an array of write locks (only the first -time a given old object is modified by a given transaction). +On the other hand, write-write conflicts are detected eagerly. It is +done with a compare-and-swap into an array of write locks (only the +first time a given old object is modified by a given transaction). This +used to be necessary in some previous version, but is kept for now +because it would require more measurements to know if it's a good or bad +idea; the alternative is to simply let conflicting writes proceed and +detect the situation at commit time only. Object creation and GC @@ -127,7 +135,7 @@ objects that are also outside the nursery. - pages need to be unshared when they contain old objects that are then - modified. + modified (and only in this case). - we need a write barrier to detect the changes done to any non-nursery object (the first time only). This is just a flag check. Then the @@ -139,13 +147,15 @@ to be synchronized, but ideally the threads should then proceed to do a parallel GC (i.e. mark in all threads in parallel, and then sweep in al threads in parallel, with one arbitrary thread - taking on the additional coordination role needed). + taking on the additional coordination role needed). But we'll think + about it when it becomes a problem. - the major collections should be triggered by the amount of really-used - memory, which means: counting the unshared pages as N pages. Major - collection should then re-share the pages as much as possible. This is - the essential part that guarantees that old, no-longer-modified - bunches of objects are eventually present in only one copy in memory, - in shared pages --- while at the same time bounding the number of - calls to remap_file_pages() for each page at N-1 per major collection - cycle. + memory, which means: counting each actual copy of a private page + independently, but shared pages as one. Major collection will then + re-share the pages as much as possible. This is the essential part + that guarantees that old, no-longer-modified bunches of objects are + eventually present in only one copy in memory, in shared pages --- + while at the same time bounding the number of calls to + remap_file_pages() at two for each private page (one to privatize, one + to re-share) for a complete major collection cycle. From noreply at buildbot.pypy.org Mon Mar 31 14:07:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 31 Mar 2014 14:07:45 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix: 'guard_not_forced_2' was ignored if followed by a 'finish' Message-ID: <20140331120745.42B7A1D29E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70345:8f8d27711915 Date: 2014-03-31 14:05 +0200 http://bitbucket.org/pypy/pypy/changeset/8f8d27711915/ Log: Test and fix: 'guard_not_forced_2' was ignored if followed by a 'finish' that doesn't return a reference. Found originally with the help of: https://bitbucket.org/samuelgiles/naulang/ diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -362,11 +362,18 @@ self.store_reg(self.mc, r.ip, r.fp, ofs, helper=r.lr) if op.numargs() > 0 and op.getarg(0).type == REF: if self._finish_gcmap: - self._finish_gcmap[0] |= r_uint(0) # r0 + # we're returning with a guard_not_forced_2, and + # additionally we need to say that r0 contains + # a reference too: + self._finish_gcmap[0] |= r_uint(0) gcmap = self._finish_gcmap else: gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) + elif self._finish_gcmap: + # we're returning with a guard_not_forced_2 + gcmap = self._finish_gcmap + self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather # keep that one and kill all the others diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -916,3 +916,73 @@ cpu.execute_token(token, 1, a)) assert getmap(frame).count('1') == 4 + def test_finish_without_gcmap(self): + cpu = self.cpu + + loop = self.parse(""" + [i0] + finish(i0, descr=finaldescr) + """, namespace={'finaldescr': BasicFinalDescr(2)}) + + token = JitCellToken() + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + cpu.compile_loop(loop.inputargs, loop.operations, token) + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, + cpu.execute_token(token, 10)) + assert not frame.jf_gcmap + + def test_finish_with_trivial_gcmap(self): + cpu = self.cpu + + loop = self.parse(""" + [p0] + finish(p0, descr=finaldescr) + """, namespace={'finaldescr': BasicFinalDescr(2)}) + + token = JitCellToken() + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + cpu.compile_loop(loop.inputargs, loop.operations, token) + n = lltype.nullptr(llmemory.GCREF.TO) + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, + cpu.execute_token(token, n)) + assert getmap(frame) == '1' + + def test_finish_with_guard_not_forced_2_ref(self): + cpu = self.cpu + + loop = self.parse(""" + [p0, p1] + guard_not_forced_2(descr=faildescr) [p1] + finish(p0, descr=finaldescr) + """, namespace={'faildescr': BasicFailDescr(1), + 'finaldescr': BasicFinalDescr(2)}) + + token = JitCellToken() + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + cpu.compile_loop(loop.inputargs, loop.operations, token) + n = lltype.nullptr(llmemory.GCREF.TO) + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, + cpu.execute_token(token, n, n)) + assert getmap(frame).count('1') == 2 + + def test_finish_with_guard_not_forced_2_int(self): + cpu = self.cpu + + loop = self.parse(""" + [i0, p1] + guard_not_forced_2(descr=faildescr) [p1] + finish(i0, descr=finaldescr) + """, namespace={'faildescr': BasicFailDescr(1), + 'finaldescr': BasicFinalDescr(2)}) + + token = JitCellToken() + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + cpu.compile_loop(loop.inputargs, loop.operations, token) + n = lltype.nullptr(llmemory.GCREF.TO) + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, + cpu.execute_token(token, 10, n)) + assert getmap(frame).count('1') == 1 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1857,11 +1857,18 @@ arglist = op.getarglist() if arglist and arglist[0].type == REF: if self._finish_gcmap: - self._finish_gcmap[0] |= r_uint(1) # rax + # we're returning with a guard_not_forced_2, and + # additionally we need to say that eax/rax contains + # a reference too: + self._finish_gcmap[0] |= r_uint(1) gcmap = self._finish_gcmap else: gcmap = self.gcmap_for_finish self.push_gcmap(self.mc, gcmap, store=True) + elif self._finish_gcmap: + # we're returning with a guard_not_forced_2 + gcmap = self._finish_gcmap + self.push_gcmap(self.mc, gcmap, store=True) else: # note that the 0 here is redundant, but I would rather # keep that one and kill all the others From noreply at buildbot.pypy.org Mon Mar 31 14:26:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 31 Mar 2014 14:26:37 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Fix the handling of pypy_stm_nursery_low_fill_mark for what is hopefully all the cases. Message-ID: <20140331122637.B951D1D29E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70346:3616feaae9f8 Date: 2014-03-31 14:25 +0200 http://bitbucket.org/pypy/pypy/changeset/3616feaae9f8/ Log: Fix the handling of pypy_stm_nursery_low_fill_mark for what is hopefully all the cases. diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -807,7 +807,7 @@ def _call_footer(self): gcrootmap = self.cpu.gc_ll_descr.gcrootmap if self.cpu.gc_ll_descr.stm and we_are_translated(): - # call _stm_become_inevitable() if the current jmpbuf is set + # call _pypy_stm_become_inevitable() if the current jmpbuf is set # to this frame, because we're about to leave. This is if # we called a pypy_stm_start_transaction() earlier. assert IS_X86_64 @@ -822,9 +822,9 @@ jne_location = mc.get_relative_pos() # # if they are equal, we need to become inevitable now - mc.MOV_ri(edi.value, rstm.adr_jit_default_msg) - mc.CALL(imm(rstm.adr__stm_become_inevitable)) - # there could have been a collection in _stm_become_inevitable; + mc.XOR_rr(edi.value, edi.value) + mc.CALL(imm(rstm.adr__pypy_stm_become_inevitable)) + # there could have been a collection in the call above; # reload the frame into ebp (but we don't need to apply the # write barrier to it now) mc.MOV(ecx, self.heap_shadowstack_top()) diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -22,10 +22,8 @@ CFlexSymbolic('((long)&STM_SEGMENT->jmpbuf_ptr)')) adr_write_slowpath = CFlexSymbolic('((long)&_stm_write_slowpath)') -adr_jit_default_msg = ( - CFlexSymbolic('((long)(char *)"return from JITted function")')) -adr__stm_become_inevitable = ( - CFlexSymbolic('((long)&_stm_become_inevitable)')) +adr__pypy_stm_become_inevitable = ( + CFlexSymbolic('((long)&_pypy_stm_become_inevitable)')) adr_stm_commit_transaction = ( CFlexSymbolic('((long)&stm_commit_transaction)')) adr_pypy_stm_start_transaction = ( diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -131,7 +131,7 @@ except AttributeError: pass string_literal = c_string_constant(info) - return 'stm_become_inevitable(&stm_thread_local, %s);' % (string_literal,) + return 'pypy_stm_become_inevitable(%s);' % (string_literal,) def stm_become_globally_unique_transaction(funcgen, op): return ('stm_become_globally_unique_transaction(&stm_thread_local,' diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -14,8 +14,8 @@ inline ssize_t stmcb_size_rounded_up(struct object_s *obj) { ssize_t result = pypy_stmcb_size_rounded_up(obj); - assert(result >= 16); - assert((result & 7) == 0); + OPT_ASSERT(result >= 16); + OPT_ASSERT((result & 7) == 0); return result; } @@ -41,8 +41,8 @@ /* the value '100' means 'use the default'. Other values are interpreted proportionally, up to some maximum. */ long low_fill_mark = (long)(LOW_FILL_MARK * fraction); - if (low_fill_mark > NURSERY_SIZE / 2) - low_fill_mark = NURSERY_SIZE / 2; + if (low_fill_mark > NURSERY_SIZE * 3 / 4) + low_fill_mark = NURSERY_SIZE * 3 / 4; pypy_transaction_length = low_fill_mark; } @@ -141,8 +141,13 @@ while (1) { if (pypy_stm_ready_atomic == 1) { - /* Not in an atomic transaction + /* Not in an atomic transaction; but it might be an inevitable + transaction. */ + assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); + assert((STM_SEGMENT->jmpbuf_ptr == NULL) == + (pypy_stm_nursery_low_fill_mark == 0)); + stm_commit_transaction(); /* After setjmp(), the local variables v_* are preserved because @@ -173,6 +178,8 @@ transaction whose jmpbuf points into this function */ if (pypy_stm_ready_atomic == 1) { + assert(pypy_stm_nursery_low_fill_mark != 0); + assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); stm_commit_transaction(); stm_start_inevitable_transaction(&stm_thread_local); pypy_stm_nursery_low_fill_mark = 0; @@ -180,11 +187,36 @@ else { _stm_become_inevitable("perform_transaction left with atomic"); assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); + pypy_stm_nursery_low_fill_mark_saved = 0; } } + /* double-check */ + if (pypy_stm_ready_atomic == 1) { + assert((STM_SEGMENT->jmpbuf_ptr == NULL) == + (pypy_stm_nursery_low_fill_mark == 0)); + } + else { + assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); + } //gcptr x = stm_pop_root(); /* pop the END_MARKER */ //assert(x == END_MARKER_OFF || x == END_MARKER_ON); STM_POP_ROOT_RET(stm_thread_local); /* pop the 'arg' */ assert(v_old_shadowstack == stm_thread_local.shadowstack); } + +void _pypy_stm_become_inevitable(const char *msg) +{ + if (msg == NULL) { + msg = "return from JITted function"; + } + _stm_become_inevitable(msg); + + if (pypy_stm_ready_atomic == 1) { + pypy_stm_nursery_low_fill_mark = 0; + } + else { + assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); + pypy_stm_nursery_low_fill_mark_saved = 0; + } +} diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -12,20 +12,34 @@ extern __thread long pypy_stm_ready_atomic; extern __thread uintptr_t pypy_stm_nursery_low_fill_mark; extern __thread uintptr_t pypy_stm_nursery_low_fill_mark_saved; +/* Invariant: if we're running a transaction: + - if it is atomic, pypy_stm_nursery_low_fill_mark == (uintptr_t) -1 + - otherwise, if it is inevitable, pypy_stm_nursery_low_fill_mark == 0 + - otherwise, it's a fraction of the nursery size strictly between 0 and 1 +*/ void pypy_stm_setup(void); void pypy_stm_setup_prebuilt(void); /* generated into stm_prebuilt.c */ void pypy_stm_register_thread_local(void); /* generated into stm_prebuilt.c */ void pypy_stm_unregister_thread_local(void); /* generated into stm_prebuilt.c */ +void _pypy_stm_become_inevitable(const char *); + + +static inline void pypy_stm_become_inevitable(const char *msg) +{ + assert(STM_SEGMENT->running_thread == &stm_thread_local); + if (STM_SEGMENT->jmpbuf_ptr != NULL) { + _pypy_stm_become_inevitable(msg); + } +} static inline void pypy_stm_commit_if_not_atomic(void) { int e = errno; if (pypy_stm_ready_atomic == 1) { stm_commit_transaction(); } else { - stm_become_inevitable(&stm_thread_local, - "commit_if_not_atomic in atomic"); + pypy_stm_become_inevitable("commit_if_not_atomic in atomic"); } errno = e; } @@ -40,6 +54,7 @@ static inline void pypy_stm_increment_atomic(void) { switch (++pypy_stm_ready_atomic) { case 2: + assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); pypy_stm_nursery_low_fill_mark_saved = pypy_stm_nursery_low_fill_mark; pypy_stm_nursery_low_fill_mark = (uintptr_t) -1; break; @@ -51,6 +66,9 @@ switch (--pypy_stm_ready_atomic) { case 1: pypy_stm_nursery_low_fill_mark = pypy_stm_nursery_low_fill_mark_saved; + assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); + assert((STM_SEGMENT->jmpbuf_ptr == NULL) == + (pypy_stm_nursery_low_fill_mark == 0)); break; case 0: pypy_stm_ready_atomic = 1; From noreply at buildbot.pypy.org Mon Mar 31 14:34:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 31 Mar 2014 14:34:42 +0200 (CEST) Subject: [pypy-commit] benchmarks default: As usual, this hack helps for now -- but not enough in this case Message-ID: <20140331123442.B7AC81D29E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r244:b4c9408aaf04 Date: 2014-03-31 08:42 +0200 http://bitbucket.org/pypy/benchmarks/changeset/b4c9408aaf04/ Log: As usual, this hack helps for now -- but not enough in this case diff --git a/multithread/raytrace/raytrace.py b/multithread/raytrace/raytrace.py --- a/multithread/raytrace/raytrace.py +++ b/multithread/raytrace/raytrace.py @@ -126,11 +126,13 @@ def task(x, h, cameraPos, objs, lightSource): + time.sleep(0) # XXX with atomic: for y in range(h): ray = Ray(cameraPos, (Vector(x/50.0-5,y/50.0-5,0)-cameraPos).normal()) trace(ray, objs, lightSource, 10) + time.sleep(0) # XXX futures = [] def future_dispatcher(ths, *args): From noreply at buildbot.pypy.org Mon Mar 31 14:35:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 31 Mar 2014 14:35:13 +0200 (CEST) Subject: [pypy-commit] pypy default: close file in tests Message-ID: <20140331123513.46DA31D29E8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70347:408d7645568c Date: 2014-03-31 14:09 +0300 http://bitbucket.org/pypy/pypy/changeset/408d7645568c/ Log: close file in tests diff --git a/lib-python/2.7/test/test_file.py b/lib-python/2.7/test/test_file.py --- a/lib-python/2.7/test/test_file.py +++ b/lib-python/2.7/test/test_file.py @@ -301,6 +301,7 @@ self.fail("readlines() after next() with empty buffer " "failed. Got %r, expected %r" % (line, testline)) # Reading after iteration hit EOF shouldn't hurt either + f.close() f = self.open(TESTFN, 'rb') try: for line in f: From noreply at buildbot.pypy.org Mon Mar 31 14:35:14 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 31 Mar 2014 14:35:14 +0200 (CEST) Subject: [pypy-commit] pypy default: a failing test, and the simplest possible fix Message-ID: <20140331123514.729331D29E8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70348:7ccee921d146 Date: 2014-03-31 15:30 +0300 http://bitbucket.org/pypy/pypy/changeset/7ccee921d146/ Log: a failing test, and the simplest possible fix diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -254,6 +254,13 @@ if '__pypy__' in sys.builtin_module_names: assert repr(self.temppath) in g.getvalue() + def test_truncate(self): + f = self.file(self.temppath, "w") + f.write("foo") + f.close() + with self.file(self.temppath, 'r') as f: + raises(IOError, f.truncate, 100) + class AppTestNonblocking(object): def setup_class(cls): diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -193,7 +193,7 @@ # Truncate. Note that this may grow the file! handle = get_osfhandle(fd) if not SetEndOfFile(handle): - raise WindowsError(GetLastError(), + raise OSError(GetLastError(), "Could not truncate file") finally: # we restore the file pointer position in any case From noreply at buildbot.pypy.org Mon Mar 31 15:46:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 31 Mar 2014 15:46:05 +0200 (CEST) Subject: [pypy-commit] stmgc default: Count the spinlooping time Message-ID: <20140331134605.BC4351C0161@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1126:a8d0ff724dea Date: 2014-03-31 15:45 +0200 http://bitbucket.org/pypy/stmgc/changeset/a8d0ff724dea/ Log: Count the spinlooping time diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -30,9 +30,14 @@ static void mutex_pages_lock(void) { + if (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) == 0) + return; + + int previous = change_timing_state(STM_TIME_SPIN_LOOP); while (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) != 0) { spin_loop(); } + change_timing_state(previous); } static void mutex_pages_unlock(void) diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -55,6 +55,7 @@ "minor gc", "major gc", "sync pause", + "spin loop", }; void stm_flush_timing(stm_thread_local_t *tl, int verbose) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -70,6 +70,7 @@ STM_TIME_MINOR_GC, STM_TIME_MAJOR_GC, STM_TIME_SYNC_PAUSE, + STM_TIME_SPIN_LOOP, _STM_TIME_N }; From noreply at buildbot.pypy.org Mon Mar 31 15:48:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 31 Mar 2014 15:48:28 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/a8d0ff724dea Message-ID: <20140331134828.F05191C14E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70349:4c8e01e55d77 Date: 2014-03-31 15:47 +0200 http://bitbucket.org/pypy/pypy/changeset/4c8e01e55d77/ Log: import stmgc/a8d0ff724dea diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -43f1137bc72e +a8d0ff724dea diff --git a/rpython/translator/stm/src_stm/stm/pages.c b/rpython/translator/stm/src_stm/stm/pages.c --- a/rpython/translator/stm/src_stm/stm/pages.c +++ b/rpython/translator/stm/src_stm/stm/pages.c @@ -31,9 +31,14 @@ static void mutex_pages_lock(void) { + if (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) == 0) + return; + + int previous = change_timing_state(STM_TIME_SPIN_LOOP); while (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) != 0) { spin_loop(); } + change_timing_state(previous); } static void mutex_pages_unlock(void) diff --git a/rpython/translator/stm/src_stm/stm/timing.c b/rpython/translator/stm/src_stm/stm/timing.c --- a/rpython/translator/stm/src_stm/stm/timing.c +++ b/rpython/translator/stm/src_stm/stm/timing.c @@ -56,6 +56,7 @@ "minor gc", "major gc", "sync pause", + "spin loop", }; void stm_flush_timing(stm_thread_local_t *tl, int verbose) diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -71,6 +71,7 @@ STM_TIME_MINOR_GC, STM_TIME_MAJOR_GC, STM_TIME_SYNC_PAUSE, + STM_TIME_SPIN_LOOP, _STM_TIME_N }; From noreply at buildbot.pypy.org Mon Mar 31 16:24:57 2014 From: noreply at buildbot.pypy.org (groggi) Date: Mon, 31 Mar 2014 16:24:57 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: fixed wrong URL to irc logs Message-ID: <20140331142457.6E1E01D28F6@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70350:ce2c23ba487a Date: 2014-03-30 14:58 +0200 http://bitbucket.org/pypy/pypy/changeset/ce2c23ba487a/ Log: fixed wrong URL to irc logs diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -103,7 +103,7 @@ .. _`more...`: architecture.html#mission-statement .. _`PyPy blog`: http://morepypy.blogspot.com/ .. _`development bug/feature tracker`: https://bugs.pypy.org -.. _here: http://tismerysoft.de/pypy/irc-logs/pypy +.. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit .. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html From noreply at buildbot.pypy.org Mon Mar 31 16:24:58 2014 From: noreply at buildbot.pypy.org (groggi) Date: Mon, 31 Mar 2014 16:24:58 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: changed pylib URL Message-ID: <20140331142458.D3A901D28F6@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70351:955089a2cc1e Date: 2014-03-31 14:05 +0200 http://bitbucket.org/pypy/pypy/changeset/955089a2cc1e/ Log: changed pylib URL Apperently http://pylib.org has no pylib information since july 2012 (web.archive.org crawls used to check) diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -389,7 +389,7 @@ .. _`pypy-dev mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`contact possibilities`: index.html -.. _`py library`: http://pylib.org +.. _`py library`: http://pylib.readthedocs.org/ .. _`Spidermonkey`: http://www.mozilla.org/js/spidermonkey/ From noreply at buildbot.pypy.org Mon Mar 31 16:25:00 2014 From: noreply at buildbot.pypy.org (groggi) Date: Mon, 31 Mar 2014 16:25:00 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: fixed EU-report link in extradoc documentation Message-ID: <20140331142500.161171D28F6@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70352:4b59ff9762a5 Date: 2014-03-31 14:23 +0200 http://bitbucket.org/pypy/pypy/changeset/4b59ff9762a5/ Log: fixed EU-report link in extradoc documentation diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -87,7 +87,7 @@ .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`Automatic generation of JIT compilers for dynamic languages in .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ecoop2009/main.pdf .. _`Core Object Optimization Results`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf -.. _`Compiling Dynamic Language Implementations`: http://codespeak.net/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`Compiling Dynamic Language Implementations`: https://bytebucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf Talks and Presentations From noreply at buildbot.pypy.org Mon Mar 31 16:25:01 2014 From: noreply at buildbot.pypy.org (groggi) Date: Mon, 31 Mar 2014 16:25:01 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: fixed old codespeak.net links to point into extradoc repository Message-ID: <20140331142501.4A61A1D28F6@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70353:461d138c2a12 Date: 2014-03-31 14:29 +0200 http://bitbucket.org/pypy/pypy/changeset/461d138c2a12/ Log: fixed old codespeak.net links to point into extradoc repository diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -258,24 +258,24 @@ .. _`PyCon 2010`: http://morepypy.blogspot.com/2010/02/pycon-2010-report.html .. _`RuPy 2009`: http://morepypy.blogspot.com/2009/11/pypy-on-rupy-2009.html -.. _`PyPy 3000`: http://codespeak.net/pypy/extradoc/talk/ep2006/pypy3000.txt -.. _`What can PyPy do for you`: http://codespeak.net/pypy/extradoc/talk/ep2006/usecases-slides.html -.. _`PyPy introduction at EuroPython 2006`: http://codespeak.net/pypy/extradoc/talk/ep2006/intro.pdf -.. _`PyPy - the new Python implementation on the block`: http://codespeak.net/pypy/extradoc/talk/22c3/hpk-tech.html -.. _`PyPy development method`: http://codespeak.net/pypy/extradoc/talk/pycon2006/method_talk.html -.. _`PyPy intro`: http://codespeak.net/pypy/extradoc/talk/accu2006/accu-2006.pdf -.. _oscon2003-paper: http://codespeak.net/pypy/extradoc/talk/oscon2003-paper.html -.. _`Architecture introduction slides`: http://codespeak.net/pypy/extradoc/talk/amsterdam-sprint-intro.pdf -.. _`EU funding for FOSS`: http://codespeak.net/pypy/extradoc/talk/2004-21C3-pypy-EU-hpk.pdf -.. _`py lib slides`: http://codespeak.net/pypy/extradoc/talk/2005-pycon-py.pdf -.. _`PyCon 2005`: http://codespeak.net/pypy/extradoc/talk/pypy-talk-pycon2005/README.html -.. _`Trouble in Paradise`: http://codespeak.net/pypy/extradoc/talk/agile2006/during-oss-sprints_talk.pdf -.. _`Sprint Driven Development`: http://codespeak.net/pypy/extradoc/talk/xp2006/during-xp2006-sprints.pdf -.. _`Kill -1`: http://codespeak.net/pypy/extradoc/talk/ep2006/kill_1_agiletalk.pdf -.. _`Open Source, EU-Funding and Agile Methods`: http://codespeak.net/pypy/extradoc/talk/22c3/agility.pdf -.. _`PyPy Status`: http://codespeak.net/pypy/extradoc/talk/vancouver/talk.html +.. _`PyPy 3000`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/pypy3000.txt +.. _`What can PyPy do for you`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/usecases-slides.html +.. _`PyPy introduction at EuroPython 2006`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/intro.pdf +.. _`PyPy - the new Python implementation on the block`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/22c3/hpk-tech.html +.. _`PyPy development method`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/pycon2006/method_talk.html +.. _`PyPy intro`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/accu2006/accu-2006.pdf +.. _oscon2003-paper: https://bytebucket.org/pypy/extradoc/raw/tip/talk/oscon2003-paper.html +.. _`Architecture introduction slides`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/amsterdam-sprint-intro.pdf +.. _`EU funding for FOSS`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/2004-21C3-pypy-EU-hpk.pdf +.. _`py lib slides`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/2005-pycon-py.pdf +.. _`PyCon 2005`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/pypy-talk-pycon2005/README.html +.. _`Trouble in Paradise`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/agile2006/during-oss-sprints_talk.pdf +.. _`Sprint Driven Development`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/xp2006/during-xp2006-sprints.pdf +.. _`Kill -1`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/kill_1_agiletalk.pdf +.. _`Open Source, EU-Funding and Agile Methods`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/22c3/agility.pdf +.. _`PyPy Status`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/vancouver/talk.html .. _`Sprinting the PyPy way`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf -.. _`PyPy's VM Approach`: http://codespeak.net/pypy/extradoc/talk/dls2006/talk.html +.. _`PyPy's VM Approach`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/dls2006/talk.html .. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf .. _`EuroPython talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2009/ .. _`PyCon talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2009/ From noreply at buildbot.pypy.org Mon Mar 31 16:25:02 2014 From: noreply at buildbot.pypy.org (groggi) Date: Mon, 31 Mar 2014 16:25:02 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: fixed old codespeak.net links to point into extradoc repository Message-ID: <20140331142502.82CD71D28F6@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70354:11dcb94c33dd Date: 2014-03-31 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/11dcb94c33dd/ Log: fixed old codespeak.net links to point into extradoc repository forgot to check the ones that point to a HTML page, that does not exist inside the repository. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -259,23 +259,23 @@ .. _`PyCon 2010`: http://morepypy.blogspot.com/2010/02/pycon-2010-report.html .. _`RuPy 2009`: http://morepypy.blogspot.com/2009/11/pypy-on-rupy-2009.html .. _`PyPy 3000`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/pypy3000.txt -.. _`What can PyPy do for you`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/usecases-slides.html +.. _`What can PyPy do for you`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/usecases-slides.txt .. _`PyPy introduction at EuroPython 2006`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/intro.pdf -.. _`PyPy - the new Python implementation on the block`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/22c3/hpk-tech.html -.. _`PyPy development method`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/pycon2006/method_talk.html +.. _`PyPy - the new Python implementation on the block`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/22c3/hpk-tech.txt +.. _`PyPy development method`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/pycon2006/method_talk.txt .. _`PyPy intro`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/accu2006/accu-2006.pdf -.. _oscon2003-paper: https://bytebucket.org/pypy/extradoc/raw/tip/talk/oscon2003-paper.html +.. _oscon2003-paper: https://bytebucket.org/pypy/extradoc/raw/tip/talk/oscon2003-paper.txt .. _`Architecture introduction slides`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/amsterdam-sprint-intro.pdf .. _`EU funding for FOSS`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/2004-21C3-pypy-EU-hpk.pdf .. _`py lib slides`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/2005-pycon-py.pdf -.. _`PyCon 2005`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/pypy-talk-pycon2005/README.html +.. _`PyCon 2005`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/pypy-talk-pycon2005/README.txt .. _`Trouble in Paradise`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/agile2006/during-oss-sprints_talk.pdf .. _`Sprint Driven Development`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/xp2006/during-xp2006-sprints.pdf .. _`Kill -1`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/kill_1_agiletalk.pdf .. _`Open Source, EU-Funding and Agile Methods`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/22c3/agility.pdf -.. _`PyPy Status`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/vancouver/talk.html +.. _`PyPy Status`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/vancouver/ .. _`Sprinting the PyPy way`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf -.. _`PyPy's VM Approach`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/dls2006/talk.html +.. _`PyPy's VM Approach`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/dls2006/ .. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf .. _`EuroPython talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2009/ .. _`PyCon talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2009/ From noreply at buildbot.pypy.org Mon Mar 31 16:25:03 2014 From: noreply at buildbot.pypy.org (groggi) Date: Mon, 31 Mar 2014 16:25:03 +0200 (CEST) Subject: [pypy-commit] pypy latest-improve-doc: fixed URLs bytebucket.org -> bitbucket.org Message-ID: <20140331142503.B0C3B1D28F6@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: latest-improve-doc Changeset: r70355:0c7bd18f76da Date: 2014-03-31 15:09 +0200 http://bitbucket.org/pypy/pypy/changeset/0c7bd18f76da/ Log: fixed URLs bytebucket.org -> bitbucket.org bytebucket.org redirects to bitbucket.org. Interestingly enough bitbucket provided me the bytebucket.org URLs. However, better use the direct ones. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -87,7 +87,7 @@ .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`Automatic generation of JIT compilers for dynamic languages in .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ecoop2009/main.pdf .. _`Core Object Optimization Results`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf -.. _`Compiling Dynamic Language Implementations`: https://bytebucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`Compiling Dynamic Language Implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf Talks and Presentations @@ -258,24 +258,24 @@ .. _`PyCon 2010`: http://morepypy.blogspot.com/2010/02/pycon-2010-report.html .. _`RuPy 2009`: http://morepypy.blogspot.com/2009/11/pypy-on-rupy-2009.html -.. _`PyPy 3000`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/pypy3000.txt -.. _`What can PyPy do for you`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/usecases-slides.txt -.. _`PyPy introduction at EuroPython 2006`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/intro.pdf -.. _`PyPy - the new Python implementation on the block`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/22c3/hpk-tech.txt -.. _`PyPy development method`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/pycon2006/method_talk.txt -.. _`PyPy intro`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/accu2006/accu-2006.pdf -.. _oscon2003-paper: https://bytebucket.org/pypy/extradoc/raw/tip/talk/oscon2003-paper.txt -.. _`Architecture introduction slides`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/amsterdam-sprint-intro.pdf -.. _`EU funding for FOSS`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/2004-21C3-pypy-EU-hpk.pdf -.. _`py lib slides`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/2005-pycon-py.pdf -.. _`PyCon 2005`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/pypy-talk-pycon2005/README.txt -.. _`Trouble in Paradise`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/agile2006/during-oss-sprints_talk.pdf -.. _`Sprint Driven Development`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/xp2006/during-xp2006-sprints.pdf -.. _`Kill -1`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/ep2006/kill_1_agiletalk.pdf -.. _`Open Source, EU-Funding and Agile Methods`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/22c3/agility.pdf -.. _`PyPy Status`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/vancouver/ +.. _`PyPy 3000`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/pypy3000.txt +.. _`What can PyPy do for you`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/usecases-slides.txt +.. _`PyPy introduction at EuroPython 2006`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/intro.pdf +.. _`PyPy - the new Python implementation on the block`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/hpk-tech.txt +.. _`PyPy development method`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2006/method_talk.txt +.. _`PyPy intro`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/accu2006/accu-2006.pdf +.. _oscon2003-paper: https://bitbucket.org/pypy/extradoc/raw/tip/talk/oscon2003-paper.txt +.. _`Architecture introduction slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/amsterdam-sprint-intro.pdf +.. _`EU funding for FOSS`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2004-21C3-pypy-EU-hpk.pdf +.. _`py lib slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2005-pycon-py.pdf +.. _`PyCon 2005`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pypy-talk-pycon2005/README.txt +.. _`Trouble in Paradise`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/agile2006/during-oss-sprints_talk.pdf +.. _`Sprint Driven Development`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/xp2006/during-xp2006-sprints.pdf +.. _`Kill -1`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/kill_1_agiletalk.pdf +.. _`Open Source, EU-Funding and Agile Methods`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/agility.pdf +.. _`PyPy Status`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/vancouver/ .. _`Sprinting the PyPy way`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf -.. _`PyPy's VM Approach`: https://bytebucket.org/pypy/extradoc/raw/tip/talk/dls2006/ +.. _`PyPy's VM Approach`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/ .. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf .. _`EuroPython talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2009/ .. _`PyCon talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2009/ From noreply at buildbot.pypy.org Mon Mar 31 16:25:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 31 Mar 2014 16:25:04 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in groggi/pypy/latest-improve-doc (pull request #218) Message-ID: <20140331142504.DFAB31D28F6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70356:db053c27ed1b Date: 2014-03-31 16:24 +0200 http://bitbucket.org/pypy/pypy/changeset/db053c27ed1b/ Log: Merged in groggi/pypy/latest-improve-doc (pull request #218) PyPy's docs: fix borken links diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -87,7 +87,7 @@ .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`Automatic generation of JIT compilers for dynamic languages in .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ecoop2009/main.pdf .. _`Core Object Optimization Results`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf -.. _`Compiling Dynamic Language Implementations`: http://codespeak.net/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`Compiling Dynamic Language Implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf Talks and Presentations @@ -258,24 +258,24 @@ .. _`PyCon 2010`: http://morepypy.blogspot.com/2010/02/pycon-2010-report.html .. _`RuPy 2009`: http://morepypy.blogspot.com/2009/11/pypy-on-rupy-2009.html -.. _`PyPy 3000`: http://codespeak.net/pypy/extradoc/talk/ep2006/pypy3000.txt -.. _`What can PyPy do for you`: http://codespeak.net/pypy/extradoc/talk/ep2006/usecases-slides.html -.. _`PyPy introduction at EuroPython 2006`: http://codespeak.net/pypy/extradoc/talk/ep2006/intro.pdf -.. _`PyPy - the new Python implementation on the block`: http://codespeak.net/pypy/extradoc/talk/22c3/hpk-tech.html -.. _`PyPy development method`: http://codespeak.net/pypy/extradoc/talk/pycon2006/method_talk.html -.. _`PyPy intro`: http://codespeak.net/pypy/extradoc/talk/accu2006/accu-2006.pdf -.. _oscon2003-paper: http://codespeak.net/pypy/extradoc/talk/oscon2003-paper.html -.. _`Architecture introduction slides`: http://codespeak.net/pypy/extradoc/talk/amsterdam-sprint-intro.pdf -.. _`EU funding for FOSS`: http://codespeak.net/pypy/extradoc/talk/2004-21C3-pypy-EU-hpk.pdf -.. _`py lib slides`: http://codespeak.net/pypy/extradoc/talk/2005-pycon-py.pdf -.. _`PyCon 2005`: http://codespeak.net/pypy/extradoc/talk/pypy-talk-pycon2005/README.html -.. _`Trouble in Paradise`: http://codespeak.net/pypy/extradoc/talk/agile2006/during-oss-sprints_talk.pdf -.. _`Sprint Driven Development`: http://codespeak.net/pypy/extradoc/talk/xp2006/during-xp2006-sprints.pdf -.. _`Kill -1`: http://codespeak.net/pypy/extradoc/talk/ep2006/kill_1_agiletalk.pdf -.. _`Open Source, EU-Funding and Agile Methods`: http://codespeak.net/pypy/extradoc/talk/22c3/agility.pdf -.. _`PyPy Status`: http://codespeak.net/pypy/extradoc/talk/vancouver/talk.html +.. _`PyPy 3000`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/pypy3000.txt +.. _`What can PyPy do for you`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/usecases-slides.txt +.. _`PyPy introduction at EuroPython 2006`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/intro.pdf +.. _`PyPy - the new Python implementation on the block`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/hpk-tech.txt +.. _`PyPy development method`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2006/method_talk.txt +.. _`PyPy intro`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/accu2006/accu-2006.pdf +.. _oscon2003-paper: https://bitbucket.org/pypy/extradoc/raw/tip/talk/oscon2003-paper.txt +.. _`Architecture introduction slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/amsterdam-sprint-intro.pdf +.. _`EU funding for FOSS`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2004-21C3-pypy-EU-hpk.pdf +.. _`py lib slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2005-pycon-py.pdf +.. _`PyCon 2005`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pypy-talk-pycon2005/README.txt +.. _`Trouble in Paradise`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/agile2006/during-oss-sprints_talk.pdf +.. _`Sprint Driven Development`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/xp2006/during-xp2006-sprints.pdf +.. _`Kill -1`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/kill_1_agiletalk.pdf +.. _`Open Source, EU-Funding and Agile Methods`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/agility.pdf +.. _`PyPy Status`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/vancouver/ .. _`Sprinting the PyPy way`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf -.. _`PyPy's VM Approach`: http://codespeak.net/pypy/extradoc/talk/dls2006/talk.html +.. _`PyPy's VM Approach`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/ .. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf .. _`EuroPython talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2009/ .. _`PyCon talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2009/ diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -389,7 +389,7 @@ .. _`pypy-dev mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`contact possibilities`: index.html -.. _`py library`: http://pylib.org +.. _`py library`: http://pylib.readthedocs.org/ .. _`Spidermonkey`: http://www.mozilla.org/js/spidermonkey/ diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -103,7 +103,7 @@ .. _`more...`: architecture.html#mission-statement .. _`PyPy blog`: http://morepypy.blogspot.com/ .. _`development bug/feature tracker`: https://bugs.pypy.org -.. _here: http://tismerysoft.de/pypy/irc-logs/pypy +.. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit .. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html From noreply at buildbot.pypy.org Mon Mar 31 19:40:30 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 31 Mar 2014 19:40:30 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added a debug-folder generated by the extract_loops.py script, which keeps the debug_* operations. Message-ID: <20140331174030.DD1F11C14E8@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r723:7c6f695323c6 Date: 2014-03-29 19:10 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/7c6f695323c6/ Log: Added a debug-folder generated by the extract_loops.py script, which keeps the debug_* operations. diff --git a/spyvm/tool/extract_loops.py b/spyvm/tool/extract_loops.py --- a/spyvm/tool/extract_loops.py +++ b/spyvm/tool/extract_loops.py @@ -6,21 +6,26 @@ print "Need pypy log-file as parameter." return 1 logfile = argv[0] - traces = logparser.extract_traces(logfile, remove_main_labels=False) tracedir = logfile + "_traces" + traces = logparser.extract_traces(logfile, remove_main_labels=False) + print_traces(traces, tracedir) + traces = logparser.extract_traces(logfile, remove_debug=False, remove_main_labels=False) + print_traces(traces, os.path.join(tracedir, "debug")) + +def print_traces(traces, tracedir): if os.path.exists(tracedir): shutil.rmtree(tracedir) os.mkdir(tracedir) for i, trace in enumerate(traces): basename = os.path.join(tracedir, "loop_" + str(i)) - print_trace(trace.loop, basename + '_main') - print_trace(trace.setup, basename + '_setup') + print_trace_part(trace.loop, basename + '_main') + print_trace_part(trace.setup, basename + '_setup') for bridge_num, bridge in enumerate(trace.bridges): - print_trace(bridge, basename + "_bridge_" + str(bridge_num)) + print_trace_part(bridge, basename + "_bridge_" + str(bridge_num)) -def print_trace(trace, filename): +def print_trace_part(trace, filename): if trace: file = open(filename, 'w') for t in trace: From noreply at buildbot.pypy.org Mon Mar 31 19:40:36 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 31 Mar 2014 19:40:36 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed RPython compile error regarding constructors. Reduced code duplication. Message-ID: <20140331174036.D0FD51C14E8@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r728:6558ff7c5237 Date: 2014-03-31 15:31 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6558ff7c5237/ Log: Fixed RPython compile error regarding constructors. Reduced code duplication. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -559,7 +559,8 @@ def initialize_storage(self, space, size, weak=False): from spyvm.shadow import empty_storage - self.store_shadow(empty_storage(space, size, weak)(space, self, size)) + storage = empty_storage(space, self, size, weak) + self.store_shadow(storage) self.log_storage("Initialized") def fillin(self, space, g_self): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -56,6 +56,8 @@ class AbstractStorageShadow(AbstractShadow): _attrs_ = [] repr_classname = "AbstractStorageShadow" + def __init__(self, space, w_self, size): + AbstractShadow.__init__(self, space, w_self) def store(self, n0, w_val): if self.can_contain(w_val): return self.do_store(n0, w_val) @@ -73,7 +75,7 @@ _attrs_ = ['_size'] _immutable_fields_ = ['_size'] def __init__(self, space, w_self, size): - AbstractShadow.__init__(self, space, w_self) + AbstractStorageShadow.__init__(self, space, w_self, size) self._size = size def fetch(self, n0): if n0 >= self._size: @@ -95,7 +97,7 @@ _immutable_fields_ = ['storage'] def __init__(self, space, w_self, size): - AbstractStorageShadow.__init__(self, space, w_self) + AbstractStorageShadow.__init__(self, space, w_self, size) self.storage = [self.nil_value] * size def size(self): @@ -162,14 +164,14 @@ def unwrap(space, w_val): return space.unwrap_float(w_val) -def empty_storage(space, size, weak=False): +def empty_storage(space, w_self, size, weak=False): if weak: - return WeakListStorageShadow + return WeakListStorageShadow(space, w_self, size) else: if no_specialized_storage: - return ListStorageShadow + return ListStorageShadow(space, w_self, size) else: - return AllNilStorageShadow + return AllNilStorageShadow(space, w_self, size) def find_storage_for_objects(space, vars): if no_specialized_storage: @@ -201,15 +203,23 @@ # If this happens, please look for a bug in the code above. assert False, "No strategy could be found for list..." - -class ListStorageShadow(AbstractShadow): + +class ListStorageMixin(object): + def __init__(self, space, w_self, size): + AbstractStorageShadow.__init__(self, space, w_self, size) + self.initialize_storage(size) + def size(self): + return len(self.storage) + def copy_from(self, other_shadow): + if self.size() != other_shadow.size(): + self.initialize_storage(other_shadow.size()) + AbstractShadow.copy_from(self, other_shadow) + +class ListStorageShadow(AbstractStorageShadow): _attrs_ = ['storage'] _immutable_fields_ = ['storage'] repr_classname = "ListStorageShadow" - - def __init__(self, space, w_self, size): - AbstractShadow.__init__(self, space, w_self) - self.initialize_storage(size) + import_from_mixin(ListStorageMixin) def initialize_storage(self, size): self.storage = [self.space.w_nil] * size @@ -217,30 +227,21 @@ return self.storage[n0] def store(self, n0, w_value): self.storage[n0] = w_value - def size(self): - return len(self.storage) - def copy_from(self, other_shadow): - if self.size() != other_shadow.size(): - self.initialize_storage(other_shadow.size()) - AbstractShadow.copy_from(self, other_shadow) -class WeakListStorageShadow(AbstractShadow): +class WeakListStorageShadow(AbstractStorageShadow): _attrs_ = ['storage'] _immutable_fields_ = ['storage'] repr_classname = "WeakListStorageShadow" + import_from_mixin(ListStorageMixin) - def __init__(self, space, w_self, size): - AbstractShadow.__init__(self, space, w_self) - self.storage = [weakref.ref(space.w_nil)] * size - + def initialize_storage(self, size): + self.storage = [weakref.ref(self.space.w_nil)] * size def fetch(self, n0): weakobj = self.storage[n0] return weakobj() or self.space.w_nil def store(self, n0, w_value): assert w_value is not None self.storage[n0] = weakref.ref(w_value) - def size(self): - return len(self.storage) class AbstractCachingShadow(ListStorageShadow): _immutable_fields_ = ['version?'] From noreply at buildbot.pypy.org Mon Mar 31 19:40:32 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 31 Mar 2014 19:40:32 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Merged W_CompiledMethod and CompiledMethodShadow. Message-ID: <20140331174032.38E451C14E8@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r724:77db240b599b Date: 2014-03-31 13:17 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/77db240b599b/ Log: Merged W_CompiledMethod and CompiledMethodShadow. There was no point in this separation: The two cannot exist separately, and CompiledMethodShadow was not at all integrated with the actual shadow architecture. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -17,8 +17,8 @@ """Illegal Store.""" def get_printable_location(pc, self, method): - bc = ord(method.bytecode[pc]) - name = method._w_self._likely_methodname + bc = ord(method.bytes[pc]) + name = method._likely_methodname return '%d: [%s]%s (%s)' % (pc, hex(bc), BYTECODE_NAMES[bc], name) @@ -90,7 +90,7 @@ s_new_context = s_sender while s_new_context is not nlr.s_target_context: s_sender = s_new_context.s_sender() - if not s_new_context.is_closure_context() and s_new_context.s_method().primitive() == 198: + if not s_new_context.is_closure_context() and s_new_context.w_method().primitive() == 198: s_new_context.activate_unwind_context(self) s_new_context.mark_returned() s_new_context = s_sender @@ -104,7 +104,7 @@ old_pc = 0 if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) - method = s_context.s_method() + method = s_context.w_method() while True: pc = s_context.pc() if pc < old_pc: @@ -122,7 +122,7 @@ self.step(s_context) except Return, nlr: if nlr.s_target_context is not s_context: - if not s_context.is_closure_context() and s_context.s_method().primitive() == 198: + if not s_context.is_closure_context() and s_context.w_method().primitive() == 198: s_context.activate_unwind_context(self) s_context.mark_returned() raise nlr @@ -165,8 +165,7 @@ w_method.literalatput0(self.space, 1, w_selector) assert len(arguments_w) <= 7 w_method.setbytes([chr(131), chr(len(arguments_w) << 5 + 0), chr(124)]) #returnTopFromMethod - s_method = w_method.as_compiledmethod_get_shadow(self.space) - s_frame = MethodContextShadow(self.space, None, s_method, w_receiver, []) + s_frame = MethodContextShadow(self.space, None, w_method, w_receiver, []) s_frame.push(w_receiver) s_frame.push_all(list(arguments_w)) @@ -285,14 +284,14 @@ def pushLiteralConstantBytecode(self, interp, current_bytecode): index = current_bytecode & 31 - self.push(self.s_method().getliteral(index)) + self.push(self.w_method().getliteral(index)) def pushLiteralVariableBytecode(self, interp, current_bytecode): # this bytecode assumes that literals[index] is an Association # which is an object with two named vars, and fetches the second # named var (the value). index = current_bytecode & 31 - w_association = self.s_method().getliteral(index) + w_association = self.w_method().getliteral(index) association = wrapper.AssociationWrapper(self.space, w_association) self.push(association.value()) @@ -337,7 +336,7 @@ # send, return bytecodes def sendLiteralSelectorBytecode(self, interp, current_bytecode): - w_selector = self.s_method().getliteral(current_bytecode & 15) + w_selector = self.w_method().getliteral(current_bytecode & 15) argcount = ((current_bytecode >> 4) & 3) - 1 return self._sendSelfSelector(w_selector, argcount, interp) @@ -347,7 +346,7 @@ receiver, receiver.class_shadow(self.space)) def _sendSuperSelector(self, w_selector, argcount, interp): - w_compiledin = self.s_method().w_compiledin + w_compiledin = self.w_method().compiled_in() assert isinstance(w_compiledin, model.W_PointersObject) s_compiledin = w_compiledin.as_class_get_shadow(self.space) return self._sendSelector(w_selector, argcount, interp, self.w_receiver(), @@ -362,18 +361,18 @@ assert argcount >= 0 try: - s_method = receiverclassshadow.lookup(w_selector) + w_method = receiverclassshadow.lookup(w_selector) except MethodNotFound: return self._doesNotUnderstand(w_selector, argcount, interp, receiver) - code = s_method.primitive() + code = w_method.primitive() if code: try: - return self._call_primitive(code, interp, argcount, s_method, w_selector) + return self._call_primitive(code, interp, argcount, w_method, w_selector) except primitives.PrimitiveFailedError: pass # ignore this error and fall back to the Smalltalk version arguments = self.pop_and_return_n(argcount) - s_frame = s_method.create_frame(receiver, arguments, self) + s_frame = w_method.create_frame(interp.space, receiver, arguments, self) self.pop() # receiver # ###################################################################### @@ -392,13 +391,13 @@ w_message.store(self.space, 1, self.space.wrap_list(arguments)) s_class = receiver.class_shadow(self.space) try: - s_method = s_class.lookup(self.space.objtable["w_doesNotUnderstand"]) + w_method = s_class.lookup(self.space.objtable["w_doesNotUnderstand"]) except MethodNotFound: from spyvm.shadow import ClassShadow assert isinstance(s_class, ClassShadow) print "Missing doesDoesNotUnderstand in hierarchy of %s" % s_class.getname() raise - s_frame = s_method.create_frame(receiver, [w_message], self) + s_frame = w_method.create_frame(interp.space, receiver, [w_message], self) self.pop() # ###################################################################### @@ -409,7 +408,7 @@ return interp.stack_frame(s_frame) - def _call_primitive(self, code, interp, argcount, s_method, w_selector): + def _call_primitive(self, code, interp, argcount, w_method, w_selector): # the primitive pushes the result (if any) onto the stack itself if interp.should_trace(): print "%sActually calling primitive %d" % (interp._last_indent, code,) @@ -421,14 +420,14 @@ code, self.w_method()._likely_methodname, w_selector.as_repr_string()) try: # note: argcount does not include rcvr - return func(interp, self, argcount, s_method) + return func(interp, self, argcount, w_method) except primitives.PrimitiveFailedError, e: if interp.trace: print "%s primitive FAILED" % ( ' ' * (interp.max_stack_depth - interp.remaining_stack_depth),) if interp.should_trace(True): - print "PRIMITIVE FAILED: %d %s" % (s_method.primitive, w_selector.as_repr_string()) + print "PRIMITIVE FAILED: %d %s" % (w_method.primitive, w_selector.as_repr_string()) raise e @@ -490,9 +489,9 @@ elif variableType == 1: self.push(self.gettemp(variableIndex)) elif variableType == 2: - self.push(self.s_method().getliteral(variableIndex)) + self.push(self.w_method().getliteral(variableIndex)) elif variableType == 3: - w_association = self.s_method().getliteral(variableIndex) + w_association = self.w_method().getliteral(variableIndex) association = wrapper.AssociationWrapper(self.space, w_association) self.push(association.value()) else: @@ -507,7 +506,7 @@ elif variableType == 2: raise IllegalStoreError elif variableType == 3: - w_association = self.s_method().getliteral(variableIndex) + w_association = self.w_method().getliteral(variableIndex) association = wrapper.AssociationWrapper(self.space, w_association) association.store_value(self.top()) @@ -517,7 +516,7 @@ def getExtendedSelectorArgcount(self): descriptor = self.getbytecode() - return ((self.s_method().getliteral(descriptor & 31)), + return ((self.w_method().getliteral(descriptor & 31)), (descriptor >> 5)) def singleExtendedSendBytecode(self, interp, current_bytecode): @@ -531,21 +530,21 @@ opType = second >> 5 if opType == 0: # selfsend - return self._sendSelfSelector(self.s_method().getliteral(third), + return self._sendSelfSelector(self.w_method().getliteral(third), second & 31, interp) elif opType == 1: # supersend - return self._sendSuperSelector(self.s_method().getliteral(third), + return self._sendSuperSelector(self.w_method().getliteral(third), second & 31, interp) elif opType == 2: # pushReceiver self.push(self.w_receiver().fetch(self.space, third)) elif opType == 3: # pushLiteralConstant - self.push(self.s_method().getliteral(third)) + self.push(self.w_method().getliteral(third)) elif opType == 4: # pushLiteralVariable - w_association = self.s_method().getliteral(third) + w_association = self.w_method().getliteral(third) association = wrapper.AssociationWrapper(self.space, w_association) self.push(association.value()) elif opType == 5: @@ -559,7 +558,7 @@ except error.SenderChainManipulation, e: raise StackOverflow(self) elif opType == 7: - w_association = self.s_method().getliteral(third) + w_association = self.w_method().getliteral(third) association = wrapper.AssociationWrapper(self.space, w_association) association.store_value(self.top()) @@ -569,7 +568,7 @@ def secondExtendedSendBytecode(self, interp, current_bytecode): descriptor = self.getbytecode() - w_selector = self.s_method().getliteral(descriptor & 63) + w_selector = self.w_method().getliteral(descriptor & 63) argcount = descriptor >> 6 return self._sendSelfSelector(w_selector, argcount, interp) @@ -925,16 +924,16 @@ ContextPartShadow._sendSelector = stepping_debugger_send(ContextPartShadow._sendSelector) def stepping_debugger_failed_primitive_halt(original): - def meth(self, code, interp, argcount, s_method, w_selector): + def meth(self, code, interp, argcount, w_method, w_selector): try: - original(self, code, interp, argcount, s_method, w_selector) + original(self, code, interp, argcount, w_method, w_selector) except primitives.PrimitiveFailedError, e: if interp.halt_on_failing_primitives: func = primitives.prim_holder.prim_table[code] if func.func_name != 'raise_failing_default' and code != 83: import pdb; pdb.set_trace() try: - func(interp, self, argcount, s_method) # will fail again + func(interp, self, argcount, w_method) # will fail again except primitives.PrimitiveFailedError: pass raise e @@ -943,12 +942,12 @@ ContextPartShadow._call_primitive = stepping_debugger_failed_primitive_halt(ContextPartShadow._call_primitive) def trace_missing_named_primitives(original): - def meth(interp, s_frame, argcount, s_method=None): + def meth(interp, s_frame, argcount, w_method=None): try: - return original(interp, s_frame, argcount, s_method=s_method) + return original(interp, s_frame, argcount, w_method=w_method) except primitives.PrimitiveFailedError, e: space = interp.space - w_description = s_method.w_self().literalat0(space, 1) + w_description = w_method.literalat0(space, 1) if not isinstance(w_description, model.W_PointersObject) or w_description.size() < 2: raise e w_modulename = w_description.at0(space, 0) diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -268,12 +268,12 @@ @expose_on_virtual_machine_proxy([], int) def methodPrimitiveIndex(): - return IProxy.s_method.primitive() + return IProxy.w_method.primitive() @expose_on_virtual_machine_proxy([oop], int) def primitiveIndexOf(w_method): if isinstance(w_method, model.W_CompiledMethod): - return w_method.as_compiledmethod_get_shadow(None).primitive() + return w_method.primitive() else: raise ProxyFunctionFailed @@ -642,7 +642,7 @@ @expose_on_virtual_machine_proxy([], oop, minor=2) def primitiveMethod(): - return IProxy.s_method.w_self() + return IProxy.w_method # /* InterpreterProxy methodsFor: 'FFI support' */ @@ -1006,12 +1006,12 @@ self.interp = None self.s_frame = None self.argcount = 0 - self.s_method = None + self.w_method = None self.fail_reason = 0 self.trace_proxy = False - def call(self, signature, interp, s_frame, argcount, s_method): - self.initialize_from_call(signature, interp, s_frame, argcount, s_method) + def call(self, signature, interp, s_frame, argcount, w_method): + self.initialize_from_call(signature, interp, s_frame, argcount, w_method) try: # eventual errors are caught by the calling function (EXTERNAL_CALL) external_function = rffi.cast(func_bool_void, @@ -1044,11 +1044,11 @@ return _external_function - def initialize_from_call(self, signature, interp, s_frame, argcount, s_method): + def initialize_from_call(self, signature, interp, s_frame, argcount, w_method): self.interp = interp self.s_frame = s_frame self.argcount = argcount - self.s_method = s_method + self.w_method = w_method self.space = interp.space self.trace_proxy = interp.trace_proxy # ensure that space.w_nil gets the first possible oop diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -16,7 +16,7 @@ """ import sys, weakref from spyvm import constants, error, version, storage_statistics -from spyvm.version import elidable_for_version +from spyvm.version import elidable_for_version, constant_for_version from rpython.rlib import rrandom, objectmodel, jit, signature from rpython.rlib.rarithmetic import intmask, r_uint, r_int @@ -1126,9 +1126,14 @@ repr_classname = "W_CompiledMethod" bytes_per_slot = 1 - _immutable_fields_ = ["_shadow?"] - _attrs_ = ["bytes", "_likely_methodname", "header", "argsize", "primitive", - "literals", "tempsize", "literalsize", "islarge", "_shadow"] + _attrs_ = [ "version", + # Method header + "header", "_primitive", "literalsize", "islarge", "_tempsize", "argsize", + # Main method content + "bytes", "literals", + # Additional info about the method + "_likely_methodname", "w_compiledin" ] + ### Extension from Squeak 3.9 doc, which we do not implement: ### trailer (variable) ### The trailer has two variant formats. In the first variant, the last @@ -1139,11 +1144,10 @@ ### variables. The number of bytes used for this purpose is the value of ### the last byte in the method. - _shadow = None # Default value _likely_methodname = "" - + import_from_mixin(version.VersionMixin) + def __init__(self, space, bytecount=0, header=0): - self._shadow = None self.setheader(space, header) self.bytes = ["\x00"] * bytecount @@ -1151,21 +1155,165 @@ # Implicitely sets the header, including self.literalsize for i, w_object in enumerate(g_self.get_pointers()): self.literalatput0(space, i, w_object) - self.setbytes(g_self.get_bytes()[(self.literalsize + 1) * 4:]) + self.setbytes(g_self.get_bytes()[self.bytecodeoffset():]) + # === Setters === + + def setheader(self, space, header): + _primitive, literalsize, islarge, tempsize, argsize = constants.decode_compiled_method_header(header) + self.literalsize = literalsize + self.literals = [space.w_nil] * self.literalsize + self.header = header + self.argsize = argsize + self._tempsize = tempsize + self._primitive = _primitive + self.islarge = islarge + self.w_compiledin = None + self.changed() + + def setliteral(self, index, w_lit): + self.literals[index] = w_lit + self.changed() + if index == len(self.literals): + self.w_compiledin = None + + def setliterals(self, literals): + """NOT RPYTHON""" # Only for testing, not safe. + self.literals = literals + self.changed() + self.w_compiledin = None + + def setbytes(self, bytes): + self.bytes = bytes + self.changed() + + def setchar(self, index0, character): + assert index0 >= 0 + self.bytes[index0] = character + self.changed() + + # === Getters === + + def getclass(self, space): + return space.w_CompiledMethod + + @constant_for_version + def size(self): + return self.headersize() + self.getliteralsize() + len(self.bytes) + + @constant_for_version + def tempsize(self): + return self._tempsize + + @constant_for_version + def getliteralsize(self): + return self.literalsize * constants.BYTES_PER_WORD + + @constant_for_version + def bytecodeoffset(self): + return self.getliteralsize() + self.headersize() + + def headersize(self): + return constants.BYTES_PER_WORD + + @constant_for_version + def getheader(self): + return self.header + + @constant_for_version + def getliteral(self, index): + return self.literals[index] + + @constant_for_version + def primitive(self): + return self._primitive + + @constant_for_version + def compute_frame_size(self): + # From blue book: normal mc have place for 12 temps+maxstack + # mc for methods with islarge flag turned on 32 + return 16 + self.islarge * 40 + self.argsize + + @constant_for_version + def getbytecode(self, pc): + assert pc >= 0 and pc < len(self.bytes) + return self.bytes[pc] + + @constant_for_version + def compiled_in(self): + w_compiledin = self.w_compiledin + if not w_compiledin: + if self.literals: + # (Blue book, p 607) All CompiledMethods that contain + # extended-super bytecodes have the clain which they are found as + # their last literal variable. + # Last of the literals is an association with compiledin as a class + w_association = self.literals[-1] + if isinstance(w_association, W_PointersObject) and w_association.size() >= 2: + from spyvm import wrapper + association = wrapper.AssociationWrapper(w_association.space(), w_association) + w_compiledin = association.value() + self.w_compiledin = w_compiledin + return w_compiledin + + # === Object Access === + + def literalat0(self, space, index0): + if index0 == 0: + return space.wrap_int(self.getheader()) + else: + return self.getliteral(index0 - 1) + + def literalatput0(self, space, index0, w_value): + if index0 == 0: + header = space.unwrap_int(w_value) + self.setheader(space, header) + else: + self.setliteral(index0 - 1, w_value) + + def store(self, space, index0, w_v): + self.atput0(space, index0, w_v) + + def at0(self, space, index0): + if index0 < self.bytecodeoffset(): + # XXX: find out what happens if unaligned + return self.literalat0(space, index0 / constants.BYTES_PER_WORD) + else: + # From blue book: + # The literal count indicates the size of the + # CompiledMethod's literal frame. + # This, in turn, indicates where the + # CompiledMethod's bytecodes start. + index0 = index0 - self.bytecodeoffset() + assert index0 < len(self.bytes) + return space.wrap_int(ord(self.bytes[index0])) + + def atput0(self, space, index0, w_value): + if index0 < self.bytecodeoffset(): + if index0 % constants.BYTES_PER_WORD != 0: + raise error.PrimitiveFailedError("improper store") + self.literalatput0(space, index0 / constants.BYTES_PER_WORD, w_value) + else: + index0 = index0 - self.bytecodeoffset() + assert index0 < len(self.bytes) + self.setchar(index0, chr(space.unwrap_int(w_value))) + + # === Misc === + def become(self, w_other): if not isinstance(w_other, W_CompiledMethod): return False self.argsize, w_other.argsize = w_other.argsize, self.argsize - self.primitive, w_other.primitive = w_other.primitive, self.primitive + self._primitive, w_other._primitive = w_other._primitive, self._primitive self.literals, w_other.literals = w_other.literals, self.literals - self.tempsize, w_other.tempsize = w_other.tempsize, self.tempsize + self._tempsize, w_other._tempsize = w_other._tempsize, self._tempsize self.bytes, w_other.bytes = w_other.bytes, self.bytes self.header, w_other.header = w_other.header, self.header self.literalsize, w_other.literalsize = w_other.literalsize, self.literalsize self.islarge, w_other.islarge = w_other.islarge, self.islarge - self._shadow, w_other._shadow = w_other._shadow, self._shadow W_AbstractObjectWithIdentityHash._become(self, w_other) + self.changed() + w_other.changed() return True def clone(self, space): @@ -1174,8 +1322,28 @@ copy.literals = list(self.literals) return copy - def getclass(self, space): - return space.w_CompiledMethod + def invariant(self): + return (W_Object.invariant(self) and + hasattr(self, 'literals') and + self.literals is not None and + hasattr(self, 'bytes') and + self.bytes is not None and + hasattr(self, 'argsize') and + self.argsize is not None and + hasattr(self, '_tempsize') and + self._tempsize is not None and + hasattr(self, '_primitive') and + self._primitive is not None) + + def is_array_object(self): + return True + + def create_frame(self, space, receiver, arguments, sender = None): + from spyvm.shadow import MethodContextShadow + assert len(arguments) == self.argsize + return MethodContextShadow(space, None, self, receiver, arguments, sender) + + # === Printing === def guess_classname (self): return "CompiledMethod" @@ -1222,118 +1390,6 @@ def get_identifier_string(self): return "%s >> #%s" % (self.guess_containing_classname(), self._likely_methodname) - def invariant(self): - return (W_Object.invariant(self) and - hasattr(self, 'literals') and - self.literals is not None and - hasattr(self, 'bytes') and - self.bytes is not None and - hasattr(self, 'argsize') and - self.argsize is not None and - hasattr(self, 'tempsize') and - self.tempsize is not None and - hasattr(self, 'primitive') and - self.primitive is not None) - - def size(self): - return self.headersize() + self.getliteralsize() + len(self.bytes) - - def gettempsize(self): - return self.tempsize - - def getliteralsize(self): - return self.literalsize * constants.BYTES_PER_WORD - - def bytecodeoffset(self): - return self.getliteralsize() + self.headersize() - - def headersize(self): - return constants.BYTES_PER_WORD - - def getheader(self): - return self.header - - def setheader(self, space, header): - primitive, literalsize, islarge, tempsize, argsize = constants.decode_compiled_method_header(header) - self.literalsize = literalsize - self.literals = [space.w_nil] * self.literalsize - self.header = header - self.argsize = argsize - self.tempsize = tempsize - self.primitive = primitive - self.islarge = islarge - - def setliterals(self, literals): - """NOT RPYTHON - Only for testing""" - self.literals = literals - if self.has_shadow(): - self._shadow.update() - - def setbytes(self, bytes): - self.bytes = bytes - - def as_compiledmethod_get_shadow(self, space): - from shadow import CompiledMethodShadow - if self._shadow is None: - self._shadow = CompiledMethodShadow(self, space) - return self._shadow - - def literalat0(self, space, index0): - if index0 == 0: - return space.wrap_int(self.getheader()) - else: - return self.literals[index0-1] - - def literalatput0(self, space, index0, w_value): - if index0 == 0: - header = space.unwrap_int(w_value) - self.setheader(space, header) - else: - self.literals[index0-1] = w_value - if self.has_shadow(): - self._shadow.update() - - def store(self, space, index0, w_v): - self.atput0(space, index0, w_v) - - def at0(self, space, index0): - if index0 < self.bytecodeoffset(): - # XXX: find out what happens if unaligned - return self.literalat0(space, index0 / constants.BYTES_PER_WORD) - else: - # From blue book: - # The literal count indicates the size of the - # CompiledMethod's literal frame. - # This, in turn, indicates where the - # CompiledMethod's bytecodes start. - index0 = index0 - self.bytecodeoffset() - assert index0 < len(self.bytes) - return space.wrap_int(ord(self.bytes[index0])) - - def atput0(self, space, index0, w_value): - if index0 < self.bytecodeoffset(): - if index0 % constants.BYTES_PER_WORD != 0: - raise error.PrimitiveFailedError("improper store") - self.literalatput0(space, index0 / constants.BYTES_PER_WORD, w_value) - else: - # XXX use to-be-written unwrap_char - index0 = index0 - self.bytecodeoffset() - assert index0 < len(self.bytes) - self.setchar(index0, chr(space.unwrap_int(w_value))) - - def setchar(self, index0, character): - assert index0 >= 0 - self.bytes[index0] = character - if self.has_shadow(): - self._shadow.update() - - def has_shadow(self): - return self._shadow is not None - - def is_array_object(self): - return True - class DetachingShadowError(Exception): def __init__(self, old_shadow, new_shadow_class): self.old_shadow = old_shadow diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -224,7 +224,7 @@ def newClosure(self, w_outer_ctxt, pc, numArgs, copiedValues): assert isinstance(w_outer_ctxt, model.W_PointersObject) - pc_with_bytecodeoffset = pc + w_outer_ctxt.as_context_get_shadow(self).s_method().bytecodeoffset + 1 + pc_with_bytecodeoffset = pc + w_outer_ctxt.as_context_get_shadow(self).w_method().bytecodeoffset() + 1 BlockClosureShadow = self.w_BlockClosure.as_class_get_shadow(self) numCopied = len(copiedValues) w_closure = BlockClosureShadow.new(numCopied) diff --git a/spyvm/plugins/plugin.py b/spyvm/plugins/plugin.py --- a/spyvm/plugins/plugin.py +++ b/spyvm/plugins/plugin.py @@ -9,12 +9,12 @@ self.prims = {} self.userdata = {} - def call(self, name, interp, s_frame, argcount, s_method): + def call(self, name, interp, s_frame, argcount, w_method): func = self._find_prim(name) if not func: raise error.PrimitiveFailedError("Not implemented: %s" % name) else: - return func(interp, s_frame, argcount, s_method) + return func(interp, s_frame, argcount, w_method) @jit.elidable def _find_prim(self, name): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -32,7 +32,7 @@ # arguments, an interp and an argument_count # completes, and returns a result, or throws a PrimitiveFailedError. def make_failing(code): - def raise_failing_default(interp, s_frame, argument_count, s_method=None): + def raise_failing_default(interp, s_frame, argument_count, w_method=None): raise PrimitiveFailedError return raise_failing_default @@ -98,9 +98,9 @@ def decorator(func): if unwrap_spec is None: - def wrapped(interp, s_frame, argument_count_m1, s_method=None): + def wrapped(interp, s_frame, argument_count_m1, w_method=None): if compiled_method: - w_result = func(interp, s_frame, argument_count_m1, s_method) + w_result = func(interp, s_frame, argument_count_m1, w_method) else: w_result = func(interp, s_frame, argument_count_m1) if result_is_new_frame: @@ -113,7 +113,7 @@ assert (len_unwrap_spec == len(inspect.getargspec(func)[0]) + 1, "wrong number of arguments") unrolling_unwrap_spec = unrolling_iterable(enumerate(unwrap_spec)) - def wrapped(interp, s_frame, argument_count_m1, s_method=None): + def wrapped(interp, s_frame, argument_count_m1, w_method=None): argument_count = argument_count_m1 + 1 # to account for the rcvr assert argument_count == len_unwrap_spec if s_frame.stackdepth() < len_unwrap_spec: @@ -645,7 +645,7 @@ return w_rcvr @expose_primitive(BITBLT_COPY_BITS, clean_stack=False, no_result=False, compiled_method=True) -def func(interp, s_frame, argcount, s_method): +def func(interp, s_frame, argcount, w_method): from spyvm.interpreter import Return w_rcvr = s_frame.peek(0) try: @@ -660,7 +660,7 @@ return w_rcvr except shadow.MethodNotFound: from spyvm.plugins.bitblt import BitBltPlugin - BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) + BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, w_method) return w_rcvr @expose_primitive(BE_CURSOR) @@ -874,9 +874,9 @@ w_rcvr.w_class = w_arg_class @expose_primitive(EXTERNAL_CALL, clean_stack=False, no_result=True, compiled_method=True) -def func(interp, s_frame, argcount, s_method): +def func(interp, s_frame, argcount, w_method): space = interp.space - w_description = s_method.w_self().literalat0(space, 1) + w_description = w_method.literalat0(space, 1) if not isinstance(w_description, model.W_PointersObject) or w_description.size() < 2: raise PrimitiveFailedError w_modulename = w_description.at0(space, 0) @@ -888,27 +888,26 @@ if signature[0] == 'BitBltPlugin': from spyvm.plugins.bitblt import BitBltPlugin - return BitBltPlugin.call(signature[1], interp, s_frame, argcount, s_method) + return BitBltPlugin.call(signature[1], interp, s_frame, argcount, w_method) elif signature[0] == "SocketPlugin": from spyvm.plugins.socket import SocketPlugin - return SocketPlugin.call(signature[1], interp, s_frame, argcount, s_method) + return SocketPlugin.call(signature[1], interp, s_frame, argcount, w_method) elif signature[0] == "FilePlugin": from spyvm.plugins.fileplugin import FilePlugin - return FilePlugin.call(signature[1], interp, s_frame, argcount, s_method) + return FilePlugin.call(signature[1], interp, s_frame, argcount, w_method) elif signature[0] == "VMDebugging": from spyvm.plugins.vmdebugging import DebuggingPlugin - return DebuggingPlugin.call(signature[1], interp, s_frame, argcount, s_method) + return DebuggingPlugin.call(signature[1], interp, s_frame, argcount, w_method) else: from spyvm.interpreter_proxy import IProxy - return IProxy.call(signature, interp, s_frame, argcount, s_method) + return IProxy.call(signature, interp, s_frame, argcount, w_method) raise PrimitiveFailedError @expose_primitive(COMPILED_METHOD_FLUSH_CACHE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): if not isinstance(w_rcvr, model.W_CompiledMethod): raise PrimitiveFailedError() - s_cm = w_rcvr.as_compiledmethod_get_shadow(interp.space) - w_class = s_cm.w_compiledin + w_class = w_rcvr.compiled_in() if w_class: w_class = assert_pointers(w_class) w_class.as_class_get_shadow(interp.space).flush_method_caches() @@ -1369,18 +1368,18 @@ s_frame.pop_n(2) # removing our arguments try: - s_method = w_rcvr.class_shadow(interp.space).lookup(w_selector) + w_method = w_rcvr.class_shadow(interp.space).lookup(w_selector) except MethodNotFound: return s_frame._doesNotUnderstand(w_selector, argcount, interp, w_rcvr) - code = s_method.primitive() + code = w_method.primitive() if code: s_frame.push_all(args_w) try: - return s_frame._call_primitive(code, interp, argcount, s_method, w_selector) + return s_frame._call_primitive(code, interp, argcount, w_method, w_selector) except PrimitiveFailedError: pass # ignore this error and fall back to the Smalltalk version - s_new_frame = s_method.create_frame(w_rcvr, args_w, s_frame) + s_new_frame = w_method.create_frame(interp.space, w_rcvr, args_w, s_frame) s_frame.pop() return interp.stack_frame(s_new_frame) @@ -1388,12 +1387,10 @@ def func(interp, s_frame, w_rcvr, args_w, w_cm): if not isinstance(w_cm, model.W_CompiledMethod): raise PrimitiveFailedError() - - s_method = w_cm.as_compiledmethod_get_shadow(interp.space) - code = s_method.primitive() + code = w_cm.primitive() if code: raise PrimitiveFailedError("withArgs:executeMethod: not support with primitive method") - s_new_frame = s_method.create_frame(w_rcvr, args_w, s_frame) + s_new_frame = w_cm.create_frame(interp.space, w_rcvr, args_w, s_frame) return interp.stack_frame(s_new_frame) @expose_primitive(SIGNAL, unwrap_spec=[object], clean_stack=False, no_result=True) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -455,9 +455,9 @@ def lookup(self, w_selector): look_in_shadow = self while look_in_shadow is not None: - s_method = look_in_shadow.s_methoddict().find_selector(w_selector) - if s_method is not None: - return s_method + w_method = look_in_shadow.s_methoddict().find_selector(w_selector) + if w_method is not None: + return w_method look_in_shadow = look_in_shadow._s_superclass raise MethodNotFound(self, w_selector) @@ -497,10 +497,9 @@ "NOT_RPYTHON" # this is only for testing. assert not isinstance(w_selector, str) self.initialize_methoddict() - s_method = w_method.as_compiledmethod_get_shadow(self.space) - self.s_methoddict().methoddict[w_selector] = s_method + self.s_methoddict().methoddict[w_selector] = w_method if isinstance(w_method, model.W_CompiledMethod): - s_method.w_compiledin = self.w_self() + w_method.w_compiledin = self.w_self() class MethodDictionaryShadow(ListStorageShadow): @@ -566,7 +565,7 @@ "If the value observed is nil, our " "invalidating mechanism may be broken.") selector = self._as_md_entry(w_selector) - self.methoddict[w_selector] = w_compiledmethod.as_compiledmethod_get_shadow(self.space) + self.methoddict[w_selector] = w_compiledmethod w_compiledmethod._likely_methodname = selector if self.s_class: self.s_class.changed() @@ -729,7 +728,7 @@ self.store_pc(-1) else: pc = self.space.unwrap_int(w_pc) - pc -= self.s_method().bytecodeoffset + pc -= self.w_method().bytecodeoffset() pc -= 1 self.store_pc(pc) @@ -739,7 +738,7 @@ return self.space.w_nil else: pc += 1 - pc += self.s_method().bytecodeoffset + pc += self.w_method().bytecodeoffset() return self.space.wrap_int(pc) def pc(self): @@ -770,14 +769,10 @@ assert isinstance(retval, model.W_CompiledMethod) return retval - def s_method(self): - w_method = jit.promote(self.w_method()) - return jit.promote(w_method.as_compiledmethod_get_shadow(self.space)) - def getbytecode(self): jit.promote(self._pc) assert self._pc >= 0 - bytecode = self.s_method().getbytecode(self._pc) + bytecode = self.w_method().getbytecode(self._pc) currentBytecode = ord(bytecode) self._pc += 1 return currentBytecode @@ -960,12 +955,12 @@ def unwrap_store_initialip(self, w_value): initialip = self.space.unwrap_int(w_value) - initialip -= 1 + self.s_method().literalsize + initialip -= 1 + self.w_method().literalsize self.store_initialip(initialip) def wrap_initialip(self): initialip = self.initialip() - initialip += 1 + self.s_method().literalsize + initialip += 1 + self.w_method().literalsize return self.space.wrap_int(initialip) def unwrap_store_eargc(self, w_value): @@ -1024,7 +1019,7 @@ repr_classname = "MethodContextShadow" @jit.unroll_safe - def __init__(self, space, w_self=None, s_method=None, w_receiver=None, + def __init__(self, space, w_self=None, w_method=None, w_receiver=None, arguments=None, s_sender=None, closure=None, pc=0): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) ContextPartShadow.__init__(self, space, w_self) @@ -1036,10 +1031,10 @@ else: self.w_closure_or_nil = space.w_nil - if s_method: - self.store_w_method(s_method.w_self()) + if w_method: + self.store_w_method(w_method) # The summand is needed, because we calculate i.a. our stackdepth relative of the size of w_self. - size = s_method.compute_frame_size() + self.space.w_MethodContext.as_class_get_shadow(self.space).instsize() + size = w_method.compute_frame_size() + self.space.w_MethodContext.as_class_get_shadow(self.space).instsize() self._w_self_size = size self.init_stack_and_temps() else: @@ -1092,7 +1087,7 @@ def tempsize(self): if not self.is_closure_context(): - return self.s_method().tempsize() + return self.w_method().tempsize() else: return wrapper.BlockClosureWrapper(self.space, self.w_closure_or_nil).tempsize() @@ -1177,82 +1172,6 @@ block = '[] of ' if self.is_closure_context() else '' return '%s%s' % (block, self.w_method().get_identifier_string()) -class CompiledMethodShadow(object): - _attrs_ = ["_w_self", "space", "bytecode", - "literals", "bytecodeoffset", - "literalsize", "_tempsize", "_primitive", - "argsize", "islarge", - "w_compiledin", "version"] - _immutable_fields_ = ["version?", "_w_self"] - import_from_mixin(version.VersionMixin) - repr_classname = "CompiledMethodShadow" - - def __init__(self, w_compiledmethod, space): - assert isinstance(w_compiledmethod, model.W_CompiledMethod) - self._w_self = w_compiledmethod - self.space = space - self.update() - - def w_self(self): - return self._w_self - - @constant_for_version - def getliteral(self, index): - return self.literals[index] - - @constant_for_version - def compute_frame_size(self): - # From blue book: normal mc have place for 12 temps+maxstack - # mc for methods with islarge flag turned on 32 - return 16 + self.islarge * 40 + self.argsize - - def getliteralsymbol(self, index): - w_literal = self.getliteral(index) - assert isinstance(w_literal, model.W_BytesObject) - return w_literal.as_string() # XXX performance issue here - - def update(self): - w_compiledmethod = self._w_self - self.changed() - self.bytecode = "".join(w_compiledmethod.bytes) - self.bytecodeoffset = w_compiledmethod.bytecodeoffset() - self.literalsize = w_compiledmethod.getliteralsize() - self._tempsize = w_compiledmethod.gettempsize() - self._primitive = w_compiledmethod.primitive - self.argsize = w_compiledmethod.argsize - self.islarge = w_compiledmethod.islarge - self.literals = w_compiledmethod.literals - - self.w_compiledin = None - if self.literals: - # (Blue book, p 607) All CompiledMethods that contain - # extended-super bytecodes have the clain which they are found as - # their last literal variable. - # Last of the literals is an association with compiledin - # as a class - w_association = self.literals[-1] - if isinstance(w_association, model.W_PointersObject) and w_association.size() >= 2: - # XXX XXX XXX where to get a space from here - association = wrapper.AssociationWrapper(self.space, w_association) - self.w_compiledin = association.value() - - @constant_for_version - def tempsize(self): - return self._tempsize - - @constant_for_version - def primitive(self): - return self._primitive - - def create_frame(self, receiver, arguments, sender = None): - assert len(arguments) == self.argsize - return MethodContextShadow(self.space, None, self, receiver, arguments, sender) - - @constant_for_version - def getbytecode(self, pc): - assert pc >= 0 and pc < len(self.bytecode) - return self.bytecode[pc] - class CachedObjectShadow(AbstractCachingShadow): repr_classname = "CachedObjectShadow" diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -61,7 +61,7 @@ for (w_class, primnum, argsize, methname) in methods: s_class = w_class.as_class_get_shadow(space) prim_meth = model.W_CompiledMethod(space, 0) - prim_meth.primitive = primnum + prim_meth._primitive = primnum prim_meth.argsize = argsize symbol = fakesymbol(methname) # somewhat evil: @@ -109,11 +109,11 @@ w_method.islarge = 1 w_method.bytes = bytes w_method.argsize=2 - w_method.tempsize=8 + w_method._tempsize=8 w_method.setliterals([model.W_PointersObject(space, None, 2)]) if receiver is None: receiver = space.w_nil - s_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(receiver, [space.w("foo"), space.w("bar")]) + s_frame = w_method.create_frame(space, receiver, [space.w("foo"), space.w("bar")]) return s_frame.w_self(), s_frame def new_frame(bytes, receiver=None): @@ -126,8 +126,8 @@ w_method.bytes="hello" w_method.islarge = 1 w_method.argsize=2 - w_method.tempsize=8 - s_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(w("receiver"), [w("foo"), w("bar")]) + w_method._tempsize=8 + s_frame = w_method.create_frame(space, w("receiver"), [w("foo"), w("bar")]) w_frame = s_frame.w_self() assert s_frame.w_receiver().as_string() == "receiver" assert s_frame.gettemp(0).as_string() == "foo" @@ -449,7 +449,7 @@ assert s_active_context.w_sender() == w_frame assert s_active_context.stack() == [] assert w_active_context.as_methodcontext_get_shadow(space).w_receiver().is_same_object(w_object) - assert w_active_context.as_methodcontext_get_shadow(space).w_method().is_same_object(shadow.s_methoddict().methoddict[w_foo].w_self()) + assert w_active_context.as_methodcontext_get_shadow(space).w_method().is_same_object(shadow.s_methoddict().methoddict[w_foo]) assert s_frame.stack() == [] step_in_interp(s_active_context) w_active_context = step_in_interp(s_active_context) @@ -469,7 +469,7 @@ method.literalsize = 1 method.bytes = bytecode method.argsize = 1 - method.tempsize = 1 + method._tempsize = 1 literals = fakeliterals(space, "fib:") method.setliterals(literals) shadow.installmethod(literals[0], method) @@ -607,7 +607,7 @@ shadow = bootstrap_class(0).as_class_get_shadow(space) w_method = model.W_CompiledMethod(space, 0) w_method.argsize = 1 - w_method.tempsize = 1 + w_method._tempsize = 1 w_method.literalsize = 1 w_symbol = fakesymbol("+") shadow.installmethod(w_symbol, w_method) @@ -619,7 +619,7 @@ s_frame.push(space.w_one) w_active_context = step_in_interp(s_frame) s_active_context = w_active_context.as_context_get_shadow(space) - assert w_active_context.as_methodcontext_get_shadow(space).s_method() == shadow.s_methoddict().methoddict[w_symbol] + assert w_active_context.as_methodcontext_get_shadow(space).w_method() == shadow.s_methoddict().methoddict[w_symbol] assert s_active_context.w_receiver() is w_object assert w_active_context.as_methodcontext_get_shadow(space).gettemp(0).is_same_object(space.w_one) assert s_active_context.stack() == [] @@ -680,7 +680,7 @@ assert s_active_context.stack() == [] assert w_active_context.as_methodcontext_get_shadow(space).w_receiver() == w_object meth = w_specificclass.as_class_get_shadow(space).s_methoddict().methoddict[foo] - assert s_active_context.w_method() == meth.w_self() + assert s_active_context.w_method() == meth assert s_caller_context.stack() == [] def test_secondExtendedSendBytecode(): @@ -999,11 +999,11 @@ w_method.islarge = 1 w_method.bytes = bytes w_method.argsize=0 - w_method.tempsize=1 + w_method._tempsize=1 w_method.setliterals([space.wrap_int(11)]) #create a frame for that method - w_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space.wrap_int(0), []).w_self() + w_frame = w_method.create_frame(space, space.wrap_int(0), []).w_self() try: interp.loop(w_frame) except interpreter.ReturnFromTopLevel, e: @@ -1013,7 +1013,7 @@ try: interp = interpreter.Interpreter(space, None, "", max_stack_depth=10) interp._loop = True - interp.c_loop(w_method.as_compiledmethod_get_shadow(space).create_frame(space.wrap_int(0), [])) + interp.c_loop(w_method.create_frame(space, space.wrap_int(0), [])) except interpreter.StackOverflow, e: assert isinstance(e.s_context, shadow.MethodContextShadow) except interpreter.ReturnFromTopLevel, e: @@ -1045,11 +1045,11 @@ w_method.islarge = 1 w_method.bytes = bytes w_method.argsize=0 - w_method.tempsize=1 + w_method._tempsize=1 w_method.setliterals([space.wrap_int(11)]) #create a frame for that method - w_frame = w_method.as_compiledmethod_get_shadow(space).create_frame(space.wrap_int(0), []).w_self() + w_frame = w_method.create_frame(space, space.wrap_int(0), []).w_self() try: interp.loop(w_frame) except interpreter.ReturnFromTopLevel, e: diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -93,9 +93,7 @@ class mockmethod(object): def __init__(self, val): self.val = val - def as_compiledmethod_get_shadow(self, space): - return self.val - w_class = bootstrap_class(mockmethod(0)) + w_class = bootstrap_class(0) shadow = w_class.as_class_get_shadow(space) shadow.installmethod(w_foo, mockmethod(1)) shadow.installmethod(w_bar, mockmethod(2)) @@ -105,11 +103,11 @@ subshadow.installmethod(w_foo, mockmethod(3)) shadow.initialize_methoddict() subshadow.initialize_methoddict() - assert shadow.lookup(w_foo) == 1 - assert shadow.lookup(w_bar) == 2 + assert shadow.lookup(w_foo).val == 1 + assert shadow.lookup(w_bar).val == 2 py.test.raises(MethodNotFound, shadow.lookup, "zork") - assert subshadow.lookup(w_foo) == 3 - assert subshadow.lookup(w_bar) == 2 + assert subshadow.lookup(w_foo).val == 3 + assert subshadow.lookup(w_bar).val == 2 py.test.raises(MethodNotFound, subshadow.lookup, "zork") def test_w_compiledin(): @@ -119,8 +117,26 @@ supershadow.installmethod(w_foo, model.W_CompiledMethod(space, 0)) classshadow = w_class.as_class_get_shadow(space) classshadow.initialize_methoddict() - assert classshadow.lookup(w_foo).w_compiledin is w_super + assert classshadow.lookup(w_foo).compiled_in() is w_super +def new_object(size=0): + return model.W_PointersObject(space, None, size) + +def test_w_compiledin_assoc(): + val = new_object() + assoc = new_object(2) + assoc.store(space, 0, new_object()) + assoc.store(space, 1, val) + meth = model.W_CompiledMethod(space, 0) + meth.setliterals([new_object(), new_object(), assoc ]) + assert meth.compiled_in() == val + +def test_w_compiledin_missing(): + meth = model.W_CompiledMethod(space, 0) + meth.w_compiledin = None + meth.setliterals([new_object(), new_object() ]) + assert meth.compiled_in() == None + def test_compiledmethod_setchar(): w_method = model.W_CompiledMethod(space, 3) w_method.setchar(0, "c") diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -80,12 +80,12 @@ methoddict = classshadow.s_methoddict().methoddict assert len(methods) == len(methoddict) for w_key, value in methoddict.items(): - assert methods[w_key.as_string()].as_compiledmethod_get_shadow(space) is value + assert methods[w_key.as_string()] is value def create_method(tempsize=3,argsize=2, bytes="abcde"): w_m = model.W_CompiledMethod(space, ) w_m.bytes = bytes - w_m.tempsize = tempsize + w_m._tempsize = tempsize w_m.argsize = argsize w_m.literalsize = 2 return w_m @@ -96,10 +96,10 @@ w_sender = space.w_nil if method is None: method = create_method() - w_object = model.W_PointersObject(space, space.w_MethodContext, constants.MTHDCTX_TEMP_FRAME_START+method.tempsize+stacksize) + w_object = model.W_PointersObject(space, space.w_MethodContext, constants.MTHDCTX_TEMP_FRAME_START+method.tempsize()+stacksize) w_object.store(space, constants.CTXPART_SENDER_INDEX, w_sender) w_object.store(space, constants.CTXPART_PC_INDEX, space.wrap_int(pc)) - w_object.store(space, constants.CTXPART_STACKP_INDEX, space.wrap_int(method.tempsize+stackpointer)) + w_object.store(space, constants.CTXPART_STACKP_INDEX, space.wrap_int(method.tempsize()+stackpointer)) w_object.store(space, constants.MTHDCTX_METHOD, method) # XXX w_object.store(space, constants.MTHDCTX_CLOSURE_OR_NIL, space.w_nil) @@ -194,27 +194,6 @@ assert w_object.shadow is s_newobject assert s_object.fetch(1).value == 13 -def test_compiledmethodshadow(): - header = joinbits([0,2,0,1,0,0],[9,8,1,6,4,1]) - - w_compiledmethod = model.W_CompiledMethod(space, 3, header) - w_compiledmethod.setbytes(list("abc")) - shadow = w_compiledmethod.as_compiledmethod_get_shadow(space) - assert shadow.bytecode == "abc" - assert shadow.bytecodeoffset == 12 - assert shadow.literalsize == 8 # 12 - 4byte header - assert shadow.tempsize() == 1 - - w_compiledmethod.literalatput0(space, 1, 17) - w_compiledmethod.literalatput0(space, 2, 41) - assert w_compiledmethod._shadow is not None - assert shadow.literals == [17, 41] - - w_compiledmethod.atput0(space, 14, space.wrap_int(ord("x"))) - - assert shadow.bytecode == "abx" - assert shadow is w_compiledmethod.as_compiledmethod_get_shadow(space) - def test_cached_object_shadow(): w_o = space.wrap_list([0, 1, 2, 3, 4, 5, 6, 7]) s_o = w_o.as_cached_object_get_shadow(space) @@ -261,14 +240,14 @@ i = i + 1 key = s_methoddict.w_self().fetch(s_methoddict.space, constants.METHODDICT_NAMES_INDEX+i) - assert (s_class.lookup(key) is foo.as_compiledmethod_get_shadow(space) - or s_class.lookup(key) is bar.as_compiledmethod_get_shadow(space)) + assert (s_class.lookup(key) is foo + or s_class.lookup(key) is bar) # change that entry w_array = s_class.w_methoddict().fetch(s_class.space, constants.METHODDICT_VALUES_INDEX) version = s_class.version w_array.atput0(space, i, baz) - assert s_class.lookup(key) is baz.as_compiledmethod_get_shadow(space) + assert s_class.lookup(key) is baz assert version is not s_class.version def test_updating_class_changes_subclasses(): @@ -288,7 +267,7 @@ s_md._w_self.atput0(space, 0, key) w_ary.atput0(space, 0, w_method) - assert s_class.lookup(key) is w_method.as_compiledmethod_get_shadow(space) + assert s_class.lookup(key) is w_method assert s_class.version is not version assert s_class.version is w_parent.as_class_get_shadow(space).version diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -15,16 +15,15 @@ _test_all_pointers_are_valid(reader) _test_lookup_abs_in_integer(interp) -def create_method_shadow(bytes, literals=[], islarge=0, argsize=0, tempsize=0): +def create_method(bytes, literals=[], islarge=0, argsize=0, tempsize=0): w_method = model.W_CompiledMethod(space, len(bytes)) w_method.bytes = bytes w_method.islarge = islarge w_method.argsize = argsize - w_method.tempsize = tempsize + w_method._tempsize = tempsize w_method.setliterals(literals) - s_method = w_method.as_compiledmethod_get_shadow(space) - return s_method + return w_method def test_ensure(): #ensure @@ -38,12 +37,12 @@ ensure_ = find_symbol_in_methoddict_of('ensure:', s_class) assert ensure_ is not None, 'Using image without #ensure:-method.' - s_method = create_method_shadow(bytes, [ensure_, w('b1'), w('b2'), + w_method = create_method(bytes, [ensure_, w('b1'), w('b2'), w('ensure'), space.w_BlockClosure]) # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) - s_initial_frame = create_method_shadow(chr(0x7c)).create_frame(w(0), []) - w_frame = s_method.create_frame(w(0), [], sender=s_initial_frame).w_self() + s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0), []) + w_frame = w_method.create_frame(space, w(0), [], sender=s_initial_frame).w_self() try: interp.loop(w_frame) @@ -64,12 +63,12 @@ ensure_ = find_symbol_in_methoddict_of('ensure:', s_class) assert ensure_ is not None, 'Using image without #ensure:-method.' - s_method = create_method_shadow(bytes, [ensure_, w('b1'), w('b2'), + w_method = create_method(bytes, [ensure_, w('b1'), w('b2'), w('ensure'), space.w_BlockClosure]) # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) - s_initial_frame = create_method_shadow(chr(0x7c)).create_frame(w(0), []) - w_frame = s_method.create_frame(w(0), [], sender=s_initial_frame).w_self() + s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0), []) + w_frame = w_method.create_frame(space, w(0), [], sender=s_initial_frame).w_self() try: interp.loop(w_frame) diff --git a/spyvm/tool/analyseimage.py b/spyvm/tool/analyseimage.py --- a/spyvm/tool/analyseimage.py +++ b/spyvm/tool/analyseimage.py @@ -56,7 +56,7 @@ w_method = s_class.lookup("tinyBenchmarks") assert w_method - w_frame = w_method.create_frame(w_object, []) + w_frame = w_method.create_frame(interp.space, w_object, []) interp.store_w_active_context(w_frame) from spyvm.interpreter import BYTECODE_TABLE diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -247,10 +247,10 @@ if not isinstance(w_outerContext, model.W_PointersObject): raise PrimitiveFailedError s_outerContext = w_outerContext.as_context_get_shadow(self.space) - s_method = s_outerContext.w_method().as_compiledmethod_get_shadow(self.space) + w_method = s_outerContext.w_method() w_receiver = s_outerContext.w_receiver() - pc = self.startpc() - s_method.bytecodeoffset - 1 - w_new_frame = shadow.MethodContextShadow(self.space, None, s_method, w_receiver, + pc = self.startpc() - w_method.bytecodeoffset() - 1 + w_new_frame = shadow.MethodContextShadow(self.space, None, w_method, w_receiver, arguments, s_sender=w_context.get_shadow(self.space), closure=self, pc=pc) return w_new_frame diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -108,8 +108,7 @@ w_method = model.W_CompiledMethod(space, header=512) w_method.literalatput0(interp.space, 1, w_selector) w_method.setbytes([chr(131), chr(argcount << 5), chr(124)]) #returnTopFromMethod - s_method = w_method.as_compiledmethod_get_shadow(interp.space) - s_frame = shadow.MethodContextShadow(interp.space, None, s_method, w_receiver, []) + s_frame = shadow.MethodContextShadow(interp.space, None, w_method, w_receiver, []) s_frame.push(w_receiver) if not stringarg == "": s_frame.push(interp.space.wrap_string(stringarg)) diff --git a/targettinybenchsmalltalk.py b/targettinybenchsmalltalk.py --- a/targettinybenchsmalltalk.py +++ b/targettinybenchsmalltalk.py @@ -24,8 +24,8 @@ w_selector = interp.perform(space.wrap_string("loopTest"), "asSymbol") w_object = model.W_SmallInteger(0) s_class = w_object.class_shadow(space) - s_method = s_class.lookup(w_selector) - s_frame = s_method.create_frame(w_object, []) + w_method = s_class.lookup(w_selector) + s_frame = w_method.create_frame(space, w_object, []) return interp, s_frame interp, s_frame = setup() From noreply at buildbot.pypy.org Mon Mar 31 19:40:37 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 31 Mar 2014 19:40:37 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Renaming for consistency. Message-ID: <20140331174037.DFECF1C14E8@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r729:f207e0a7310b Date: 2014-03-31 19:14 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f207e0a7310b/ Log: Renaming for consistency. diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -250,10 +250,10 @@ w_method = s_outerContext.w_method() w_receiver = s_outerContext.w_receiver() pc = self.startpc() - w_method.bytecodeoffset() - 1 - w_new_frame = shadow.MethodContextShadow(self.space, None, w_method, w_receiver, + s_new_frame = shadow.MethodContextShadow(self.space, None, w_method, w_receiver, arguments, s_sender=w_context.get_shadow(self.space), closure=self, pc=pc) - return w_new_frame + return s_new_frame def tempsize(self): # We ignore the number of temps a block has, because the first From noreply at buildbot.pypy.org Mon Mar 31 19:40:39 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 31 Mar 2014 19:40:39 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Made methods in ContextPartShadow and subclasses more consistent. Message-ID: <20140331174039.08ADD1C14E8@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r730:5497a87c79ff Date: 2014-03-31 19:37 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/5497a87c79ff/ Log: Made methods in ContextPartShadow and subclasses more consistent. Moved abstract methods down into the subclasses, removed useless methods, added comments. Also, fixed a bug in MethodContext.__init__, when activating a closure. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -692,11 +692,12 @@ def as_context_get_shadow(self, space): from spyvm.shadow import ContextPartShadow - # XXX TODO should figure out itself if its method or block context if not isinstance(self.shadow, ContextPartShadow): - if ContextPartShadow.is_block_context(self, space): + if self.getclass(space).is_same_object(space.w_BlockContext): return self.as_blockcontext_get_shadow(space) - return self.as_methodcontext_get_shadow(space) + if self.getclass(space).is_same_object(space.w_MethodContext): + return self.as_methodcontext_get_shadow(space) + raise ValueError("This object cannot be treated like a Context object!") return self.as_special_get_shadow(space, ContextPartShadow) def as_methoddict_get_shadow(self, space): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1283,7 +1283,7 @@ # context of the receiver is used for the new BlockContext. # Note that in our impl, MethodContext.w_home == self w_context = assert_pointers(w_context) - w_method_context = w_context.as_context_get_shadow(interp.space).w_home() + w_method_context = w_context.as_context_get_shadow(interp.space).s_home().w_self() # The block bytecodes are stored inline: so we skip past the # byteodes to invoke this primitive to find them (hence +2) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -614,6 +614,9 @@ "_w_self", "_w_self_size" ] + # ______________________________________________________________________ + # Initialization + def __init__(self, space, w_self): self._s_sender = None AbstractRedirectingShadow.__init__(self, space, w_self) @@ -640,13 +643,10 @@ def fields_to_copy_first(self): return [] - - @staticmethod - def is_block_context(w_pointers, space): - method_or_argc = w_pointers.fetch(space, constants.MTHDCTX_METHOD) - return method_or_argc.getclass(space).is_same_object( - space.w_SmallInteger) - + + # ______________________________________________________________________ + # Accessing object fields + def fetch(self, n0): if n0 == constants.CTXPART_SENDER_INDEX: return self.w_sender() @@ -666,7 +666,12 @@ def store(self, n0, w_value): if n0 == constants.CTXPART_SENDER_INDEX: - return self.store_w_sender(w_value) + assert isinstance(w_value, model.W_PointersObject) + if w_value.is_nil(self.space): + self._s_sender = None + else: + self.store_s_sender(w_value.as_context_get_shadow(self.space)) + return if n0 == constants.CTXPART_PC_INDEX: return self.store_unwrap_pc(w_value) if n0 == constants.CTXPART_STACKP_INDEX: @@ -680,7 +685,24 @@ else: # XXX later should store tail out of known context part as well raise error.WrapperException("Index in context out of bounds") - + + # === Sender === + + def store_s_sender(self, s_sender): + assert s_sender is None or isinstance(s_sender, ContextPartShadow) + self._s_sender = s_sender + raise error.SenderChainManipulation(self) + + def w_sender(self): + if self._s_sender is None: + return self.space.w_nil + return self._s_sender.w_self() + + def s_sender(self): + return self._s_sender + + # === Stack Pointer === + def unwrap_store_stackpointer(self, w_sp1): # the stackpointer in the W_PointersObject starts counting at the # tempframe start @@ -697,48 +719,14 @@ for i in range(depth, size): self.push(self.space.w_nil) + def stackdepth(self): + return rarithmetic.intmask(self._stack_ptr) + def wrap_stackpointer(self): return self.space.wrap_int(self.stackdepth()) - def external_stackpointer(self): - return self.stackdepth() + self.stackstart() - - def w_home(self): - raise NotImplementedError() - - def s_home(self): - return self.w_home().as_methodcontext_get_shadow(self.space) - - def stackstart(self): - raise NotImplementedError() - - def stackpointer_offset(self): - raise NotImplementedError() - - def w_receiver(self): - " Return self of the method, or the method that contains the block " - return self.s_home().w_receiver() - - def store_s_sender(self, s_sender): - assert s_sender is None or isinstance(s_sender, ContextPartShadow) - self._s_sender = s_sender - raise error.SenderChainManipulation(self) - - def store_w_sender(self, w_sender): - assert isinstance(w_sender, model.W_PointersObject) - if w_sender.is_nil(self.space): - self._s_sender = None - else: - self.store_s_sender(w_sender.as_context_get_shadow(self.space)) - - def w_sender(self): - if self._s_sender is None: - return self.space.w_nil - return self._s_sender.w_self() - - def s_sender(self): - return self._s_sender - + # === Program Counter === + def store_unwrap_pc(self, w_pc): if w_pc.is_nil(self.space): self.store_pc(-1) @@ -763,10 +751,29 @@ def store_pc(self, newpc): assert newpc >= -1 self._pc = newpc - - def stackpointer_offset(self): + + # === Subclassed accessors === + + def s_home(self): raise NotImplementedError() + def stackstart(self): + raise NotImplementedError() + + def w_receiver(self): + raise NotImplementedError() + + def w_method(self): + raise NotImplementedError() + + def tempsize(self): + raise NotImplementedError() + + def is_closure_context(self): + raise NotImplementedError() + + # === Other properties of Contexts === + def mark_returned(self): self.store_pc(-1) try: @@ -775,16 +782,15 @@ assert self == e.s_context def is_returned(self): - return self.pc() == -1 and self.w_sender.is_nil(self.space) + return self.pc() == -1 and self.w_sender().is_nil(self.space) - # ______________________________________________________________________ - # Method that contains the bytecode for this method/block context - - def w_method(self): - retval = self.s_home().w_method() - assert isinstance(retval, model.W_CompiledMethod) - return retval - + def external_stackpointer(self): + return self.stackdepth() + self.stackstart() + + def stackend(self): + # XXX this is incorrect when there is subclassing + return self._w_self_size + def getbytecode(self): jit.promote(self._pc) assert self._pc >= 0 @@ -792,31 +798,35 @@ currentBytecode = ord(bytecode) self._pc += 1 return currentBytecode - + # ______________________________________________________________________ # Temporary Variables # - # Are always fetched relative to the home method context. + # Every context has it's own stack. BlockContexts share their temps with + # their home contexts. MethodContexts created from a BlockClosure get their + # temps copied from the closure upon activation. Changes are not propagated back; + # this is handled by the compiler by allocating an extra Array for temps. def gettemp(self, index): - return self.s_home().gettemp(index) + raise NotImplementedError() def settemp(self, index, w_value): - self.s_home().settemp(index, w_value) + raise NotImplementedError() + + # ______________________________________________________________________ + # Stack Manipulation @jit.unroll_safe def init_stack_and_temps(self): stacksize = self.stackend() - self.stackstart() tempsize = self.tempsize() - self._temps_and_stack = [None] * (stacksize + tempsize) - make_sure_not_resized(self._temps_and_stack) + temps_and_stack = [None] * (stacksize + tempsize) + self._temps_and_stack = temps_and_stack + make_sure_not_resized(temps_and_stack) for i in range(tempsize): - self._temps_and_stack[i] = self.space.w_nil + temps_and_stack[i] = self.space.w_nil self._stack_ptr = rarithmetic.r_uint(tempsize) # we point after the last element - - # ______________________________________________________________________ - # Stack Manipulation - + def stack_get(self, index0): return self._temps_and_stack[index0] @@ -830,8 +840,8 @@ def pop(self): #assert self._stack_ptr > self.tempsize() ptr = jit.promote(self._stack_ptr) - 1 - ret = self._temps_and_stack[ptr] # you get OverflowError if the stack is empty - self._temps_and_stack[ptr] = None + ret = self.stack_get(ptr) # you get OverflowError if the stack is empty + self.stack_put(ptr, None) self._stack_ptr = ptr return ret @@ -867,10 +877,7 @@ while n > 0: n -= 1 self._stack_ptr -= 1 - self._temps_and_stack[self._stack_ptr] = None - - def stackdepth(self): - return rarithmetic.intmask(self._stack_ptr) + self.stack_put(self._stack_ptr, None) @jit.unroll_safe def pop_and_return_n(self, n): @@ -878,29 +885,9 @@ self.pop_n(n) return result - def stackend(self): - # XXX this is incorrect when there is subclassing - return self._w_self_size - - def tempsize(self): - raise NotImplementedError() # ______________________________________________________________________ - # Marriage of Context Shadows with PointerObjects only when required - - def w_self(self): - if self._w_self is not None: - return self._w_self - else: - s_MethodContext = self.space.w_MethodContext.as_class_get_shadow(self.space) - size = self.size() - s_MethodContext.instsize() - space = self.space - w_self = s_MethodContext.new(size) - assert isinstance(w_self, model.W_PointersObject) - w_self.store_shadow(self) - self._w_self = w_self - self._w_self_size = w_self.size() - return w_self - + # Primitive support + def store_instances_array(self, w_class, match_w): # used for primitives 77 & 78 self.instances_w[w_class] = match_w @@ -910,7 +897,7 @@ return self.instances_w.get(w_class, None) # ______________________________________________________________________ - # Debugging printout + # Printing def print_stack(self, method=True): return self.print_padded_stack(method)[1] @@ -930,11 +917,14 @@ _attrs_ = ['_w_home', '_initialip', '_eargc'] repr_classname = "BlockContextShadow" + # === Initialization === + def __init__(self, space, w_self=None, w_home=None, argcnt=0, initialip=0): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) creating_w_self = w_self is None if creating_w_self: - contextsize = w_home.as_methodcontext_get_shadow(space).myblocksize() + s_home = w_home.as_methodcontext_get_shadow(space) + contextsize = s_home.size() - s_home.tempsize() w_self = model.W_PointersObject(space, space.w_BlockContext, contextsize) ContextPartShadow.__init__(self, space, w_self) if creating_w_self: @@ -949,9 +939,42 @@ def fields_to_copy_first(self): return [ constants.BLKCTX_HOME_INDEX ] + # === Implemented accessors === + + def s_home(self): + return self._w_home.as_methodcontext_get_shadow(self.space) + + def stackstart(self): + return constants.BLKCTX_STACK_START + + def tempsize(self): + # A blockcontext doesn't have any temps + return 0 + + def w_receiver(self): + return self.s_home().w_receiver() + + def w_method(self): + retval = self.s_home().w_method() + assert isinstance(retval, model.W_CompiledMethod) + return retval + + def is_closure_context(self): + return True + + # === Temporary variables === + + def gettemp(self, index): + return self.s_home().gettemp(index) + + def settemp(self, index, w_value): + self.s_home().settemp(index, w_value) + + # === Accessing object fields === + def fetch(self, n0): if n0 == constants.BLKCTX_HOME_INDEX: - return self.w_home() + return self._w_home if n0 == constants.BLKCTX_INITIAL_IP_INDEX: return self.wrap_initialip() if n0 == constants.BLKCTX_BLOCK_ARGUMENT_COUNT_INDEX: @@ -968,17 +991,27 @@ return self.unwrap_store_eargc(w_value) else: return ContextPartShadow.store(self, n0, w_value) - + + def store_w_home(self, w_home): + assert isinstance(w_home, model.W_PointersObject) + self._w_home = w_home + def unwrap_store_initialip(self, w_value): initialip = self.space.unwrap_int(w_value) initialip -= 1 + self.w_method().literalsize self.store_initialip(initialip) + def store_initialip(self, initialip): + self._initialip = initialip + def wrap_initialip(self): initialip = self.initialip() initialip += 1 + self.w_method().literalsize return self.space.wrap_int(initialip) + def initialip(self): + return self._initialip + def unwrap_store_eargc(self, w_value): self.store_expected_argument_count(self.space.unwrap_int(w_value)) @@ -991,35 +1024,13 @@ def store_expected_argument_count(self, argc): self._eargc = argc - def initialip(self): - return self._initialip - - def store_initialip(self, initialip): - self._initialip = initialip - - def store_w_home(self, w_home): - assert isinstance(w_home, model.W_PointersObject) - self._w_home = w_home - - def w_home(self): - return self._w_home - + # === Stack Manipulation === + def reset_stack(self): self.pop_n(self.stackdepth()) - def stackstart(self): - return constants.BLKCTX_STACK_START - - def stackpointer_offset(self): - return constants.BLKCTX_STACK_START - - def tempsize(self): - # A blockcontext doesn't have any temps - return 0 - - def is_closure_context(self): - return True - + # === Printing === + def short_str(self): return 'BlockContext of %s (%s) [%d]' % ( self.w_method().get_identifier_string(), @@ -1031,9 +1042,11 @@ return '[] of %s' % self.w_method().get_identifier_string() class MethodContextShadow(ContextPartShadow): - _attrs_ = ['w_closure_or_nil', '_w_receiver', '_w_method'] + _attrs_ = ['closure', '_w_receiver', '_w_method'] repr_classname = "MethodContextShadow" + # === Initialization === + @jit.unroll_safe def __init__(self, space, w_self=None, w_method=None, w_receiver=None, arguments=None, s_sender=None, closure=None, pc=0): @@ -1041,11 +1054,7 @@ ContextPartShadow.__init__(self, space, w_self) self.store_w_receiver(w_receiver) self.store_pc(pc) - - if closure: - self.w_closure_or_nil = closure._w_self - else: - self.w_closure_or_nil = space.w_nil + self.closure = closure if w_method: self.store_w_method(w_method) @@ -1066,18 +1075,26 @@ argc = len(arguments) for i0 in range(argc): self.settemp(i0, arguments[i0]) - if closure is not None: - for i0 in range(closure.size()): - self.settemp(i0+argc, closure.at0(i0)) + else: + argc = 0 + + if closure: + for i0 in range(closure.size()): + self.settemp(i0+argc, closure.at0(i0)) def fields_to_copy_first(self): return [ constants.MTHDCTX_METHOD, constants.MTHDCTX_CLOSURE_OR_NIL ] + # === Accessing object fields === + def fetch(self, n0): if n0 == constants.MTHDCTX_METHOD: return self.w_method() if n0 == constants.MTHDCTX_CLOSURE_OR_NIL: - return self.w_closure_or_nil + if self.closure: + return self.closure._w_self + else: + return self.space.w_nil if n0 == constants.MTHDCTX_RECEIVER: return self.w_receiver() temp_i = n0-constants.MTHDCTX_TEMP_FRAME_START @@ -1090,7 +1107,10 @@ if n0 == constants.MTHDCTX_METHOD: return self.store_w_method(w_value) if n0 == constants.MTHDCTX_CLOSURE_OR_NIL: - self.w_closure_or_nil = w_value + if w_value.is_nil(self.space): + self.closure = None + else: + self.closure = wrapper.BlockClosureWrapper(self.space, w_value) return if n0 == constants.MTHDCTX_RECEIVER: self.store_w_receiver(w_value) @@ -1100,63 +1120,75 @@ return self.settemp(temp_i, w_value) else: return ContextPartShadow.store(self, n0, w_value) - - def tempsize(self): - if not self.is_closure_context(): - return self.w_method().tempsize() + + def store_w_receiver(self, w_receiver): + self._w_receiver = w_receiver + + # === Implemented Accessors === + + def s_home(self): + if self.is_closure_context(): + # this is a context for a blockClosure + w_outerContext = self.closure.outerContext() + assert isinstance(w_outerContext, model.W_PointersObject) + s_outerContext = w_outerContext.as_context_get_shadow(self.space) + # XXX check whether we can actually return from that context + if s_outerContext.is_returned(): + raise error.BlockCannotReturnError() + return s_outerContext.s_home() else: - return wrapper.BlockClosureWrapper(self.space, - self.w_closure_or_nil).tempsize() - - def w_method(self): - retval = self._w_method - assert isinstance(retval, model.W_CompiledMethod) - return retval - + return self + + def stackstart(self): + return constants.MTHDCTX_TEMP_FRAME_START + def store_w_method(self, w_method): assert isinstance(w_method, model.W_CompiledMethod) self._w_method = w_method def w_receiver(self): return self._w_receiver + + def w_method(self): + retval = self._w_method + assert isinstance(retval, model.W_CompiledMethod) + return retval + + def tempsize(self): + if not self.is_closure_context(): + return self.w_method().tempsize() + else: + return self.closure.tempsize() + + def is_closure_context(self): + return self.closure is not None + + # ______________________________________________________________________ + # Marriage of MethodContextShadows with PointerObjects only when required - def store_w_receiver(self, w_receiver): - self._w_receiver = w_receiver - + def w_self(self): + if self._w_self is not None: + return self._w_self + else: + s_MethodContext = self.space.w_MethodContext.as_class_get_shadow(self.space) + size = self.size() - s_MethodContext.instsize() + space = self.space + w_self = s_MethodContext.new(size) + assert isinstance(w_self, model.W_PointersObject) + w_self.store_shadow(self) + self._w_self = w_self + self._w_self_size = w_self.size() + return w_self + + # === Temporary variables === + def gettemp(self, index0): return self.stack_get(index0) def settemp(self, index0, w_value): self.stack_put(index0, w_value) - def w_home(self): - return self.w_self() - - def s_home(self): - if self.is_closure_context(): - # this is a context for a blockClosure - w_outerContext = self.w_closure_or_nil.fetch(self.space, - constants.BLKCLSR_OUTER_CONTEXT) - assert isinstance(w_outerContext, model.W_PointersObject) - s_outerContext = w_outerContext.as_context_get_shadow(self.space) - # XXX check whether we can actually return from that context - if s_outerContext.pc() == -1: - raise error.BlockCannotReturnError() - return s_outerContext.s_home() - else: - return self - - def stackpointer_offset(self): - return constants.MTHDCTX_TEMP_FRAME_START - - def stackstart(self): - return constants.MTHDCTX_TEMP_FRAME_START - - def myblocksize(self): - return self.size() - self.tempsize() - - def is_closure_context(self): - return not self.w_closure_or_nil.is_nil(self.space) + # === Printing === def __str__(self): retval = '\nMethodContext of:' diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -602,7 +602,7 @@ def test_primitive_closure_value(): s_initial_context, closure, s_new_context = build_up_closure_environment([]) - assert s_new_context.w_closure_or_nil is closure + assert s_new_context.closure._w_self is closure assert s_new_context.s_sender() is s_initial_context assert s_new_context.w_receiver().is_nil(space) @@ -610,7 +610,7 @@ s_initial_context, closure, s_new_context = build_up_closure_environment([ wrap("first arg"), wrap("second arg")]) - assert s_new_context.w_closure_or_nil is closure + assert s_new_context.closure._w_self is closure assert s_new_context.s_sender() is s_initial_context assert s_new_context.w_receiver().is_nil(space) assert s_new_context.gettemp(0).as_string() == "first arg" @@ -621,7 +621,7 @@ [wrap("first arg"), wrap("second arg")], copiedValues=[wrap('some value')]) - assert s_new_context.w_closure_or_nil is closure + assert s_new_context.closure._w_self is closure assert s_new_context.s_sender() is s_initial_context assert s_new_context.w_receiver().is_nil(space) assert s_new_context.gettemp(0).as_string() == "first arg" From noreply at buildbot.pypy.org Mon Mar 31 19:40:33 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 31 Mar 2014 19:40:33 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Made become() more consistent (implemented for more types). Message-ID: <20140331174033.5F5FC1C14E8@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r725:4e34de85beee Date: 2014-03-31 14:08 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/4e34de85beee/ Log: Made become() more consistent (implemented for more types). Removed _immutable_fields from ByteObject and WordsObject (not immutable due to become). Merged W_AbstractPointersObject and subclasses into a single W_PointersObject. This allowed for become() with a weak and a non-weak object. diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -365,7 +365,7 @@ @expose_on_virtual_machine_proxy([oop], bool) def isWeak(w_object): - return isinstance(w_object, model.W_WeakPointersObject) + return isinstance(w_object, model.W_PointersObject) and w_object.is_weak() @expose_on_virtual_machine_proxy([oop], bool) def isWords(w_object): diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -284,7 +284,21 @@ def invariant(self): return isinstance(self.hash, int) + def become(self, w_other): + if not self.can_become(w_other): + return False + if self.is_same_object(w_other): + return False + self._become(w_other) + return True + + def can_become(self, w_other): + # TODO -- what about become: with a Float and a CompiledMethod etc.? + # We might be in trouble regarding W_LargePositiveInteger1Word, too. + return self.__class__ is w_other.__class__ + def _become(self, w_other): + assert isinstance(w_other, W_AbstractObjectWithIdentityHash) self.hash, w_other.hash = w_other.hash, self.hash class W_LargePositiveInteger1Word(W_AbstractObjectWithIdentityHash): @@ -370,6 +384,12 @@ def is_array_object(self): return True + + def _become(self, w_other): + assert isinstance(w_other, W_LargePositiveInteger1Word) + self.value, w_other.value = w_other.value, self.value + self._exposed_size, w_other._exposed_size = w_other._exposed_size, self._exposed_size + W_AbstractObjectWithIdentityHash._become(self, w_other) class W_Float(W_AbstractObjectWithIdentityHash): """Boxed float value.""" @@ -409,7 +429,7 @@ return isinstance(self.value, float) def _become(self, w_other): - # TODO -- shouldn't this be named 'become'? + assert isinstance(w_other, W_Float) self.value, w_other.value = w_other.value, self.value W_AbstractObjectWithIdentityHash._become(self, w_other) @@ -509,6 +529,7 @@ isinstance(self.w_class.shadow, shadow.ClassShadow)) def _become(self, w_other): + assert isinstance(w_other, W_AbstractObjectWithClassReference) self.w_class, w_other.w_class = w_other.w_class, self.w_class W_AbstractObjectWithIdentityHash._become(self, w_other) @@ -523,37 +544,45 @@ assert w_class is not None return w_class.as_class_get_shadow(space) -class W_AbstractPointersObject(W_AbstractObjectWithClassReference): +class W_PointersObject(W_AbstractObjectWithClassReference): """Common object.""" _attrs_ = ['shadow'] shadow = None - repr_classname = "W_AbstractPointersObject" + repr_classname = "W_PointersObject" log_storage = storage_statistics.log @jit.unroll_safe - def __init__(self, space, w_class, size): + def __init__(self, space, w_class, size, weak=False): """Create new object with size = fixed + variable size.""" W_AbstractObjectWithClassReference.__init__(self, space, w_class) - self.initialize_storage(space, size) - - def initialize_storage(self, space, size): - self.store_shadow(self.empty_storage(space, size)) + self.initialize_storage(space, size, weak) + + def initialize_storage(self, space, size, weak=False): + if weak: + from spyvm.shadow import WeakListStorageShadow + storage = WeakListStorageShadow(space, self, size) + else: + from spyvm.shadow import AllNilStorageShadow + storage = AllNilStorageShadow(space, self, size) + self.store_shadow(storage) self.log_storage("Initialized") - + def fillin(self, space, g_self): W_AbstractObjectWithClassReference.fillin(self, space, g_self) # Recursive fillin required to enable specialized storage strategies. for g_obj in g_self.pointers: g_obj.fillin(space) pointers = g_self.get_pointers() - self.store_shadow(self.storage_for_list(space, pointers)) + # TODO -- Also handle weak objects loaded from images. + from spyvm.shadow import find_storage_for_objects + storage = find_storage_for_objects(space, pointers)(space, self, len(pointers)) + self.store_shadow(storage) self.store_all(space, pointers) self.log_storage("Filledin", log_classname=False) - - def empty_storage(self, space, size): - raise NotImplementedError() - def storage_for_list(self, space, vars): - raise NotImplementedError() + + def is_weak(self): + from shadow import WeakListStorageShadow + return isinstance(self.shadow, WeakListStorageShadow) def assert_shadow(self): # Failing the following assert most likely indicates a bug. The shadow can only be absent during @@ -591,9 +620,9 @@ if self.has_shadow(): shadow_info = self.shadow.__repr__() if self.shadow.provides_getname: - name = self._get_shadow().getname() - return '(%s) len=%d [%s]' % (shadow_info, self.size(), name) - + name = " [%s]" % self._get_shadow().getname() + return '(%s) len=%d%s' % (shadow_info, self.size(), name) + def fetch_all(self, space): return [self.fetch(space, i) for i in range(self.size())] @@ -689,15 +718,13 @@ def has_shadow(self): return self._get_shadow() is not None - def become(self, w_other): - if not isinstance(w_other, W_AbstractPointersObject): - return False + def _become(self, w_other): + assert isinstance(w_other, W_PointersObject) self.shadow, w_other.shadow = w_other.shadow, self.shadow # shadow links are in both directions -> also update shadows if self.shadow is not None: self.shadow._w_self = self if w_other.shadow is not None: w_other.shadow._w_self = w_other W_AbstractObjectWithClassReference._become(self, w_other) - return True @jit.unroll_safe def clone(self, space): @@ -706,33 +733,8 @@ w_result.store_all(space, my_pointers) return w_result -class W_PointersObject(W_AbstractPointersObject): - repr_classname = 'W_PointersObject' - - def empty_storage(self, space, size): - # A newly allocated object contains only nils. - from spyvm.shadow import AllNilStorageShadow - return AllNilStorageShadow(space, self, size) - - def storage_for_list(self, space, vars): - #if not self.class_shadow(space).isvariable(): - # return ListStorageShadow(space, self, len(vars)) - from spyvm.shadow import find_storage_for_objects - return find_storage_for_objects(space, vars)(space, self, len(vars)) - -class W_WeakPointersObject(W_AbstractPointersObject): - repr_classname = 'W_WeakPointersObject' - - def empty_storage(self, space, size): - from spyvm.shadow import WeakListStorageShadow - return WeakListStorageShadow(space, self, size) - def storage_for_list(self, space, vars): - from spyvm.shadow import WeakListStorageShadow - return WeakListStorageShadow(space, self, len(vars)) - class W_BytesObject(W_AbstractObjectWithClassReference): _attrs_ = ['bytes', 'c_bytes', '_size'] - _immutable_fields_ = ['_size', 'bytes[*]?'] repr_classname = 'W_BytesObject' bytes_per_slot = 1 @@ -859,13 +861,19 @@ self.bytes = None return c_bytes + def _become(self, w_other): + assert isinstance(w_other, W_BytesObject) + self.bytes, w_other.bytes = w_other.bytes, self.bytes + self.c_bytes, w_other.c_bytes = w_other.c_bytes, self.c_bytes + self._size, w_other._size = w_other._size, self._size + W_AbstractObjectWithClassReference._become(self, w_other) + def __del__(self): if self.bytes is None: rffi.free_charp(self.c_bytes) class W_WordsObject(W_AbstractObjectWithClassReference): _attrs_ = ['words', 'c_words', '_size'] - _immutable_fields_ = ['_size'] repr_classname = "W_WordsObject" def __init__(self, space, w_class, size): @@ -974,12 +982,18 @@ w_display_bitmap.setword(idx, self.getword(idx)) w_form.store(interp.space, 0, w_display_bitmap) return w_display_bitmap - + + def _become(self, w_other): + assert isinstance(w_other, W_WordsObject) + self.words, w_other.words = w_other.words, self.words + self.c_words, w_other.c_words = w_other.c_words, self.c_words + self._size, w_other._size = w_other._size, self._size + W_AbstractObjectWithClassReference._become(self, w_other) + def __del__(self): if self.words is None: lltype.free(self.c_words, flavor='raw') - class W_DisplayBitmap(W_AbstractObjectWithClassReference): _attrs_ = ['pixelbuffer', '_realsize', '_real_depth_buffer', 'display', '_depth'] _immutable_fields_ = ['_realsize', 'display', '_depth'] @@ -1051,6 +1065,10 @@ def convert_to_c_layout(self): return self._real_depth_buffer + def can_become(self, w_other): + # TODO - implement _become() for this class. Impossible due to _immutable_fields_? + return False + def __del__(self): lltype.free(self._real_depth_buffer, flavor='raw') @@ -1133,7 +1151,7 @@ "bytes", "literals", # Additional info about the method "_likely_methodname", "w_compiledin" ] - + ### Extension from Squeak 3.9 doc, which we do not implement: ### trailer (variable) ### The trailer has two variant formats. In the first variant, the last @@ -1297,12 +1315,11 @@ index0 = index0 - self.bytecodeoffset() assert index0 < len(self.bytes) self.setchar(index0, chr(space.unwrap_int(w_value))) - + # === Misc === - def become(self, w_other): - if not isinstance(w_other, W_CompiledMethod): - return False + def _become(self, w_other): + assert isinstance(w_other, W_CompiledMethod) self.argsize, w_other.argsize = w_other.argsize, self.argsize self._primitive, w_other._primitive = w_other._primitive, self._primitive self.literals, w_other.literals = w_other.literals, self.literals @@ -1311,10 +1328,11 @@ self.header, w_other.header = w_other.header, self.header self.literalsize, w_other.literalsize = w_other.literalsize, self.literalsize self.islarge, w_other.islarge = w_other.islarge, self.islarge + self._likely_methodname, w_other._likely_methodname = w_other._likely_methodname, self._likely_methodname + self.w_compiledin, w_other.w_compiledin = w_other.w_compiledin, self.w_compiledin W_AbstractObjectWithIdentityHash._become(self, w_other) self.changed() w_other.changed() - return True def clone(self, space): copy = W_CompiledMethod(space, 0, self.getheader()) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -19,7 +19,7 @@ def __init__(self, space, w_self): self.space = space - assert w_self is None or isinstance(w_self, model.W_AbstractPointersObject) + assert w_self is None or isinstance(w_self, model.W_PointersObject) self._w_self = w_self def w_self(self): return self._w_self @@ -401,7 +401,7 @@ w_new = model.W_BytesObject(self.space, w_cls, extrasize) elif self.instance_kind == WEAK_POINTERS: size = self.instsize() + extrasize - w_new = model.W_WeakPointersObject(self.space, w_cls, size) + w_new = model.W_PointersObject(self.space, w_cls, size, weak=True) else: raise NotImplementedError(self.instance_kind) return w_new diff --git a/spyvm/test/jittest/test_strategies.py b/spyvm/test/jittest/test_strategies.py --- a/spyvm/test/jittest/test_strategies.py +++ b/spyvm/test/jittest/test_strategies.py @@ -34,9 +34,9 @@ setarrayitem_gc(p78, 1, p195, descr=), guard_class(p193, 18294904, descr=), p196 = getfield_gc(p193, descr=), - p197 = getfield_gc(p196, descr=), + p197 = getfield_gc(p196, descr=), guard_value(p197, ConstPtr(ptr117), descr=), - p198 = getfield_gc(p193, descr=), + p198 = getfield_gc(p193, descr=), setarrayitem_gc(p78, 0, ConstPtr(null), descr=), setfield_gc(p66, 0, descr=), setfield_gc(ConstPtr(ptr80), i131, descr=), @@ -47,9 +47,9 @@ p203 = getarrayitem_gc(p201, 0, descr=), guard_class(p203, 18294904, descr=), p204 = getfield_gc(p203, descr=), - p205 = getfield_gc(p204, descr=), + p205 = getfield_gc(p204, descr=), guard_value(p205, ConstPtr(ptr149), descr=), - p206 = getfield_gc(p203, descr=), + p206 = getfield_gc(p203, descr=), guard_nonnull_class(p206, 18300088, descr=), p207 = getfield_gc_pure(p206, descr=), i208 = arraylen_gc(p207, descr=), From noreply at buildbot.pypy.org Mon Mar 31 19:40:34 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 31 Mar 2014 19:40:34 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added flag to disable optimizing storage strategies. Message-ID: <20140331174034.91C3B1C14E8@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r726:3e91a15babf5 Date: 2014-03-31 14:13 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3e91a15babf5/ Log: Added flag to disable optimizing storage strategies. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -558,13 +558,8 @@ self.initialize_storage(space, size, weak) def initialize_storage(self, space, size, weak=False): - if weak: - from spyvm.shadow import WeakListStorageShadow - storage = WeakListStorageShadow(space, self, size) - else: - from spyvm.shadow import AllNilStorageShadow - storage = AllNilStorageShadow(space, self, size) - self.store_shadow(storage) + from spyvm.shadow import empty_storage + self.store_shadow(empty_storage(space, size, weak)(space, self, size)) self.log_storage("Initialized") def fillin(self, space, g_self): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -8,6 +8,10 @@ from rpython.rlib.rstruct.runpack import runpack from rpython.rtyper.lltypesystem import rffi, lltype +# If this is True, then no optimizing storage strategies will be used. +# Intended for performance comparisons. Breaks tests. +no_specialized_storage = False + class AbstractShadow(object): """A shadow is an optional extra bit of information that can be attached at run-time to any Smalltalk object. @@ -158,7 +162,18 @@ def unwrap(space, w_val): return space.unwrap_float(w_val) +def empty_storage(space, size, weak=False): + if weak: + return WeakListStorageShadow + else: + if no_specialized_storage: + return ListStorageShadow + else: + return AllNilStorageShadow + def find_storage_for_objects(space, vars): + if no_specialized_storage: + return ListStorageShadow specialized_strategies = 3 all_nil_can_handle = True small_int_can_handle = True From noreply at buildbot.pypy.org Mon Mar 31 19:40:35 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 31 Mar 2014 19:40:35 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Putting a '%' char on the console after filling in a w_object when loading the image. Message-ID: <20140331174035.AB1B81C14E8@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r727:ccc2cb222452 Date: 2014-03-31 14:16 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ccc2cb222452/ Log: Putting a '%' char on the console after filling in a w_object when loading the image. diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -307,9 +307,15 @@ return special[index] def fillin_w_objects(self): + self.filledin_objects = 0 for chunk in self.chunks.itervalues(): chunk.g_object.fillin(self.space) + def print_object_filledin(self): + self.filledin_objects = self.filledin_objects + 1 + if self.filledin_objects % 1000 == 0: + os.write(2,'%') + def init_compactclassesarray(self): """ from the blue book (CompiledMethod Symbol Array PseudoContext LargePositiveInteger nil MethodDictionary Association Point Rectangle nil TranslatedMethod BlockContext MethodContext nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil ) """ special = self.chunks[self.specialobjectspointer] @@ -571,6 +577,7 @@ if not self.filled_in: self.filled_in = True self.w_object.fillin(space, self) + self.reader.print_object_filledin() def get_g_pointers(self): assert self.pointers is not None From noreply at buildbot.pypy.org Mon Mar 31 20:05:52 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 31 Mar 2014 20:05:52 +0200 (CEST) Subject: [pypy-commit] pypy default: give up earlier on win32 Message-ID: <20140331180552.8563F1D2873@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70357:40c8ce233196 Date: 2014-03-31 20:41 +0300 http://bitbucket.org/pypy/pypy/changeset/40c8ce233196/ Log: give up earlier on win32 diff --git a/pypy/tool/release/test/test_package.py b/pypy/tool/release/test/test_package.py --- a/pypy/tool/release/test/test_package.py +++ b/pypy/tool/release/test/test_package.py @@ -17,6 +17,8 @@ exe_name_in_archive = 'bin/pypy' pypy_c = py.path.local(pypydir).join('goal', basename) if not pypy_c.check(): + if sys.platform == 'win32': + assert False, "test on win32 requires exe" pypy_c.write("#!/bin/sh") pypy_c.chmod(0755) fake_pypy_c = True @@ -81,6 +83,8 @@ package.USE_ZIPFILE_MODULE = prev def test_fix_permissions(tmpdir): + if sys.platform == 'win32': + py.test.skip('needs to be more general for windows') def check(f, mode): assert f.stat().mode & 0777 == mode # From noreply at buildbot.pypy.org Mon Mar 31 20:05:54 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 31 Mar 2014 20:05:54 +0200 (CEST) Subject: [pypy-commit] pypy default: add hased dir for _ctypes_test, create fewer /tmp/tmp* directories Message-ID: <20140331180554.1473B1D2873@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70358:d5a24ea7028a Date: 2014-03-31 20:53 +0300 http://bitbucket.org/pypy/pypy/changeset/d5a24ea7028a/ Log: add hased dir for _ctypes_test, create fewer /tmp/tmp* directories diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,3 +1,5 @@ +import imp, os + try: import cpyext except ImportError: @@ -10,4 +12,12 @@ pass # obscure condition of _ctypes_test.py being imported by py.test else: import _pypy_testcapi - _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') + cfile = '_ctypes_test.c' + thisdir = os.path.dirname(__file__) + output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + try: + fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) + imp.load_module('_ctypes_test', fp, filename, description) + except ImportError: + print 'could not find _ctypes_test in',output_dir + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,5 +1,22 @@ import os, sys, imp -import tempfile +import tempfile, binascii + +def get_hashed_dir(cfile): + with open(cfile,'r') as fid: + content = fid.read() + # from cffi's Verifier() + key = '\x00'.join([sys.version[:3], content]) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + return output_dir + def _get_c_extension_suffix(): for ext, mod, typ in imp.get_suffixes(): diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,33 +1,17 @@ -import sys, tempfile, imp, binascii, os +import imp, os try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") -def get_hashed_dir(cfile): - with open(cfile,'r') as fid: - content = fid.read() - # from cffi's Verifier() - key = '\x00'.join([sys.version[:3], content]) - if sys.version_info >= (3,): - key = key.encode('utf-8') - k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) - k1 = k1.lstrip('0x').rstrip('L') - k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) - k2 = k2.lstrip('0').rstrip('L') - output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) - if not os.path.exists(output_dir): - os.mkdir(output_dir) - return output_dir - +import _pypy_testcapi cfile = '_testcapimodule.c' thisdir = os.path.dirname(__file__) -output_dir = get_hashed_dir(os.path.join(thisdir, cfile)) +output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) imp.load_module('_testcapi', fp, filename, description) except ImportError: - import _pypy_testcapi _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/pypy/module/test_lib_pypy/test_testcapi.py b/pypy/module/test_lib_pypy/test_testcapi.py --- a/pypy/module/test_lib_pypy/test_testcapi.py +++ b/pypy/module/test_lib_pypy/test_testcapi.py @@ -8,7 +8,7 @@ def test_get_hashed_dir(): import sys # This should not compile _testcapi, so the output is empty - script = "import _testcapi; assert 'get_hashed_dir' in dir(_testcapi)" + script = "import _testcapi; assert 'get_hashed_dir' not in dir(_testcapi)" output = py.process.cmdexec('''"%s" -c "%s"''' % (sys.executable, script)) assert output == '' From noreply at buildbot.pypy.org Mon Mar 31 20:05:55 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 31 Mar 2014 20:05:55 +0200 (CEST) Subject: [pypy-commit] pypy default: package missing *.c files Message-ID: <20140331180555.67E6D1D2873@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r70359:a15406e0d0f0 Date: 2014-03-31 21:01 +0300 http://bitbucket.org/pypy/pypy/changeset/a15406e0d0f0/ Log: package missing *.c files diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -151,6 +151,9 @@ '*.c', '*.o')) for file in ['LICENSE', 'README.rst']: shutil.copy(str(basedir.join(file)), str(pypydir)) + for file in ['_testcapimodule.c', '_ctypes_test.c']: + shutil.copyfile(str(basedir.join('lib_pypy', file)), + str(pypydir.join('lib_pypy', file))) # spdir = pypydir.ensure('site-packages', dir=True) shutil.copy(str(basedir.join('site-packages', 'README')), str(spdir)) From noreply at buildbot.pypy.org Mon Mar 31 22:43:18 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 31 Mar 2014 22:43:18 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: merge default into branch Message-ID: <20140331204318.980301D29F4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70360:6d0a69f7f620 Date: 2014-03-31 22:56 +0300 http://bitbucket.org/pypy/pypy/changeset/6d0a69f7f620/ Log: merge default into branch diff too long, truncating to 2000 out of 9291 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -64,6 +64,7 @@ ^pypy/goal/pypy-jvm.jar ^pypy/goal/.+\.exe$ ^pypy/goal/.+\.dll$ +^pypy/goal/.+\.lib$ ^pypy/_cache$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -51,16 +51,22 @@ self.config = config self.logfile = logfile # preferably line buffered - def write_log_entry(self, testpath, lettercode, longrepr): + def write_log_entry(self, testpath, lettercode, longrepr, sections=[]): py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile) for line in longrepr.splitlines(): py.builtin.print_(" %s" % line, file=self.logfile) + for key, text in sections: + py.builtin.print_(" ", file=self.logfile) + py.builtin.print_(" -------------------- %s --------------------" + % key.rstrip(), file=self.logfile) + py.builtin.print_(" %s" % (text.rstrip().replace('\n', '\n '),), + file=self.logfile) def log_outcome(self, report, lettercode, longrepr): testpath = getattr(report, 'nodeid', None) if testpath is None: testpath = report.fspath - self.write_log_entry(testpath, lettercode, longrepr) + self.write_log_entry(testpath, lettercode, longrepr, report.sections) def pytest_runtest_logreport(self, report): if report.when != "call" and report.passed: diff --git a/lib-python/2.7/test/test_file.py b/lib-python/2.7/test/test_file.py --- a/lib-python/2.7/test/test_file.py +++ b/lib-python/2.7/test/test_file.py @@ -301,6 +301,7 @@ self.fail("readlines() after next() with empty buffer " "failed. Got %r, expected %r" % (line, testline)) # Reading after iteration hit EOF shouldn't hurt either + f.close() f = self.open(TESTFN, 'rb') try: for line in f: diff --git a/lib-python/2.7/test/test_genericpath.py b/lib-python/2.7/test/test_genericpath.py --- a/lib-python/2.7/test/test_genericpath.py +++ b/lib-python/2.7/test/test_genericpath.py @@ -231,9 +231,14 @@ unicwd = u'\xe7w\xf0' try: fsencoding = test_support.TESTFN_ENCODING or "ascii" - unicwd.encode(fsencoding) + asciival = unicwd.encode(fsencoding) + if fsencoding == "mbcs": + # http://bugs.python.org/issue850997 + v = asciival.find('?') + if v >= 0: + raise UnicodeEncodeError(fsencoding, unicwd, v, v, asciival) except (AttributeError, UnicodeEncodeError): - # FS encoding is probably ASCII + # FS encoding is probably ASCII or windows and codepage is non-Latin1 pass else: with test_support.temp_cwd(unicwd): diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,6 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) + f.close() def test_head(self): response = self.request( diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -171,7 +171,7 @@ # very inconsisten on CPython. In PyPy, memoryview supports # the buffer interface, and thus the following comparison # succeeds. See also the comment in - # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer # # Comparison with objects which don't support the buffer API self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -219,6 +219,7 @@ if restype is None: import ctypes restype = ctypes.c_int + self._argtypes_ = argsl self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,3 +1,5 @@ +import imp, os + try: import cpyext except ImportError: @@ -10,4 +12,12 @@ pass # obscure condition of _ctypes_test.py being imported by py.test else: import _pypy_testcapi - _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') + cfile = '_ctypes_test.c' + thisdir = os.path.dirname(__file__) + output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + try: + fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) + imp.load_module('_ctypes_test', fp, filename, description) + except ImportError: + print 'could not find _ctypes_test in',output_dir + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,5 +1,22 @@ import os, sys, imp -import tempfile +import tempfile, binascii + +def get_hashed_dir(cfile): + with open(cfile,'r') as fid: + content = fid.read() + # from cffi's Verifier() + key = '\x00'.join([sys.version[:3], content]) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + return output_dir + def _get_c_extension_suffix(): for ext, mod, typ in imp.get_suffixes(): @@ -7,12 +24,13 @@ return ext -def compile_shared(csource, modulename): +def compile_shared(csource, modulename, output_dir=None): """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, and import it. """ thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() + if output_dir is None: + output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -38,6 +38,7 @@ if sys.version_info[0] >= 3: StandardError = Exception + cmp = lambda x, y: (x > y) - (x < y) long = int xrange = range basestring = unicode = str diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,7 +1,17 @@ +import imp, os + try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") -else: - import _pypy_testcapi - _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') + +import _pypy_testcapi +cfile = '_testcapimodule.c' +thisdir = os.path.dirname(__file__) +output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + +try: + fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) + imp.load_module('_testcapi', fp, filename, description) +except ImportError: + _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -350,7 +350,7 @@ r_sample = getsample(cp, size, i + 1) sample = (l_sample * fac1) + (r_sample * fac2) - sample = clip(sample) + sample = int(clip(sample)) _put_sample(result, size, i // 2, sample) @@ -501,7 +501,7 @@ # slice off extra bytes trim_index = (out_i * bytes_per_frame) - len(retval) - retval = _buffer(retval)[:trim_index] + retval = retval[:trim_index] return (retval, (d, tuple(samps))) diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -292,6 +292,10 @@ depending on the compiler settings, the default of 768KB is enough for about 1400 calls. +* since the implementation of dictionary is different, the exact number + which ``__hash__`` and ``__eq__`` are called is different. Since CPython + does not give any specific guarantees either, don't rely on it. + * assignment to ``__class__`` is limited to the cases where it works on CPython 2.5. On CPython 2.6 and 2.7 it works in a bit more cases, which are not supported by PyPy so far. (If needed, diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -87,7 +87,7 @@ .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`Automatic generation of JIT compilers for dynamic languages in .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ecoop2009/main.pdf .. _`Core Object Optimization Results`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf -.. _`Compiling Dynamic Language Implementations`: http://codespeak.net/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`Compiling Dynamic Language Implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf Talks and Presentations @@ -258,24 +258,24 @@ .. _`PyCon 2010`: http://morepypy.blogspot.com/2010/02/pycon-2010-report.html .. _`RuPy 2009`: http://morepypy.blogspot.com/2009/11/pypy-on-rupy-2009.html -.. _`PyPy 3000`: http://codespeak.net/pypy/extradoc/talk/ep2006/pypy3000.txt -.. _`What can PyPy do for you`: http://codespeak.net/pypy/extradoc/talk/ep2006/usecases-slides.html -.. _`PyPy introduction at EuroPython 2006`: http://codespeak.net/pypy/extradoc/talk/ep2006/intro.pdf -.. _`PyPy - the new Python implementation on the block`: http://codespeak.net/pypy/extradoc/talk/22c3/hpk-tech.html -.. _`PyPy development method`: http://codespeak.net/pypy/extradoc/talk/pycon2006/method_talk.html -.. _`PyPy intro`: http://codespeak.net/pypy/extradoc/talk/accu2006/accu-2006.pdf -.. _oscon2003-paper: http://codespeak.net/pypy/extradoc/talk/oscon2003-paper.html -.. _`Architecture introduction slides`: http://codespeak.net/pypy/extradoc/talk/amsterdam-sprint-intro.pdf -.. _`EU funding for FOSS`: http://codespeak.net/pypy/extradoc/talk/2004-21C3-pypy-EU-hpk.pdf -.. _`py lib slides`: http://codespeak.net/pypy/extradoc/talk/2005-pycon-py.pdf -.. _`PyCon 2005`: http://codespeak.net/pypy/extradoc/talk/pypy-talk-pycon2005/README.html -.. _`Trouble in Paradise`: http://codespeak.net/pypy/extradoc/talk/agile2006/during-oss-sprints_talk.pdf -.. _`Sprint Driven Development`: http://codespeak.net/pypy/extradoc/talk/xp2006/during-xp2006-sprints.pdf -.. _`Kill -1`: http://codespeak.net/pypy/extradoc/talk/ep2006/kill_1_agiletalk.pdf -.. _`Open Source, EU-Funding and Agile Methods`: http://codespeak.net/pypy/extradoc/talk/22c3/agility.pdf -.. _`PyPy Status`: http://codespeak.net/pypy/extradoc/talk/vancouver/talk.html +.. _`PyPy 3000`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/pypy3000.txt +.. _`What can PyPy do for you`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/usecases-slides.txt +.. _`PyPy introduction at EuroPython 2006`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/intro.pdf +.. _`PyPy - the new Python implementation on the block`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/hpk-tech.txt +.. _`PyPy development method`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2006/method_talk.txt +.. _`PyPy intro`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/accu2006/accu-2006.pdf +.. _oscon2003-paper: https://bitbucket.org/pypy/extradoc/raw/tip/talk/oscon2003-paper.txt +.. _`Architecture introduction slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/amsterdam-sprint-intro.pdf +.. _`EU funding for FOSS`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2004-21C3-pypy-EU-hpk.pdf +.. _`py lib slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2005-pycon-py.pdf +.. _`PyCon 2005`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pypy-talk-pycon2005/README.txt +.. _`Trouble in Paradise`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/agile2006/during-oss-sprints_talk.pdf +.. _`Sprint Driven Development`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/xp2006/during-xp2006-sprints.pdf +.. _`Kill -1`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/kill_1_agiletalk.pdf +.. _`Open Source, EU-Funding and Agile Methods`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/agility.pdf +.. _`PyPy Status`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/vancouver/ .. _`Sprinting the PyPy way`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf -.. _`PyPy's VM Approach`: http://codespeak.net/pypy/extradoc/talk/dls2006/talk.html +.. _`PyPy's VM Approach`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/ .. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf .. _`EuroPython talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2009/ .. _`PyCon talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2009/ diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -187,7 +187,7 @@ No, we found no way of doing that. The JIT generates machine code containing a large number of constant addresses --- constant at the time -the machine code is written. The vast majority is probably not at all +the machine code is generated. The vast majority is probably not at all constants that you find in the executable, with a nice link name. E.g. the addresses of Python classes are used all the time, but Python classes don't come statically from the executable; they are created anew @@ -212,12 +212,16 @@ garbage collection, implementation of various things like arbitrarily long integers, etc. -Currently, we have preliminary versions of a JavaScript interpreter -(Leonardo Santagada as his Summer of PyPy project), a `Prolog interpreter`_ -(Carl Friedrich Bolz as his Bachelor thesis), and a `SmallTalk interpreter`_ +Currently, we have `Topaz`_, a Ruby interpreter; `Hippy`_, a PHP +interpreter; preliminary versions of a `JavaScript interpreter`_ +(Leonardo Santagada as his Summer of PyPy project); a `Prolog interpreter`_ +(Carl Friedrich Bolz as his Bachelor thesis); and a `SmallTalk interpreter`_ (produced during a sprint). On the `PyPy bitbucket page`_ there is also a Scheme and an Io implementation; both of these are unfinished at the moment. +.. _`Topaz`: http://topazruby.com/ +.. _`Hippy`: http://morepypy.blogspot.ch/2012/07/hello-everyone.html +.. _`JavaScript interpreter`: https://bitbucket.org/pypy/lang-js/ .. _`Prolog interpreter`: https://bitbucket.org/cfbolz/pyrolog/ .. _`SmallTalk interpreter`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`PyPy bitbucket page`: https://bitbucket.org/pypy/ diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -14,7 +14,7 @@ The present document describes the specific garbage collectors that we wrote in our framework. -.. _`EU-report on this topic`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU-report on this topic`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf Garbage collectors currently written for the GC framework diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -389,7 +389,7 @@ .. _`pypy-dev mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`contact possibilities`: index.html -.. _`py library`: http://pylib.org +.. _`py library`: http://pylib.readthedocs.org/ .. _`Spidermonkey`: http://www.mozilla.org/js/spidermonkey/ diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -103,7 +103,7 @@ .. _`more...`: architecture.html#mission-statement .. _`PyPy blog`: http://morepypy.blogspot.com/ .. _`development bug/feature tracker`: https://bugs.pypy.org -.. _here: http://tismerysoft.de/pypy/irc-logs/pypy +.. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit .. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -211,6 +211,9 @@ .. __: `recursion depth limit`_ +We also do not include any of the recent API additions to Stackless +Python, like ``set_atomic()``. Contributions welcome. + Recursion depth limit +++++++++++++++++++++ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -105,3 +105,25 @@ .. branch: stdlib-2.7.6 Update stdlib to v2.7.6 + +.. branch: virtual-raw-store-load +Support for virtualizing raw_store/raw_load operations + +.. branch: refactor-buffer-api +Separate the interp-level buffer API from the buffer type exposed to +app-level. The `Buffer` class is now used by `W_MemoryView` and +`W_Buffer`, which is not present in Python 3. Previously `W_Buffer` was +an alias to `Buffer`, which was wrappable itself. + +.. branch: improve-consecutive-dict-lookups +Improve the situation when dict lookups of the same key are performed in a chain + +.. branch: add_PyErr_SetFromErrnoWithFilenameObject_try_2 +.. branch: test_SetFromErrnoWithFilename_NULL +.. branch: test_SetFromErrnoWithFilename__tweaks + +.. branch: refactor_PyErr_SetFromErrnoWithFilename +Add support for PyErr_SetFromErrnoWithFilenameObject to cpyext + +.. branch: win32-fixes4 +fix more tests for win32 diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -194,6 +194,14 @@ def immutable_unique_id(self, space): return None + def buffer_w(self, space): + w_impl = space.lookup(self, '__buffer__') + if w_impl is not None: + w_result = space.get_and_call_function(w_impl, self) + if space.isinstance_w(w_result, space.w_buffer): + return w_result.buffer_w(space) + self._typed_unwrap_error(space, "buffer") + def str_w(self, space): self._typed_unwrap_error(space, "string") @@ -435,14 +443,12 @@ def getbuiltinmodule(self, name, force_init=False): w_name = self.wrap(name) w_modules = self.sys.get('modules') - try: - w_mod = self.getitem(w_modules, w_name) - except OperationError, e: - if not e.match(self, self.w_KeyError): - raise - else: - if not force_init: - return w_mod + if not force_init: + try: + return self.getitem(w_modules, w_name) + except OperationError, e: + if not e.match(self, self.w_KeyError): + raise # If the module is a builtin but not yet imported, # retrieve it and initialize it @@ -453,13 +459,13 @@ "getbuiltinmodule() called with non-builtin module %s", name) else: - # Add the module to sys.modules - self.setitem(w_modules, w_name, w_mod) - - # And initialize it + # Initialize the module from pypy.interpreter.module import Module if isinstance(w_mod, Module): w_mod.init(self) + + # Add the module to sys.modules + self.setitem(w_modules, w_name, w_mod) return w_mod def get_builtinmodule_to_install(self): @@ -1316,10 +1322,7 @@ 'to unsigned int')) def buffer_w(self, w_obj): - # returns a Buffer instance - from pypy.interpreter.buffer import Buffer - w_buffer = self.buffer(w_obj) - return self.interp_w(Buffer, w_buffer) + return w_obj.buffer_w(self) def rwbuffer_w(self, w_obj): # returns a RWBuffer instance @@ -1679,7 +1682,6 @@ ('set', 'set', 3, ['__set__']), ('delete', 'delete', 2, ['__delete__']), ('userdel', 'del', 1, ['__del__']), - ('buffer', 'buffer', 1, ['__buffer__']), # see buffer.py ] ObjSpace.BuiltinModuleTable = [ diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -1,32 +1,12 @@ """ Buffer protocol support. """ +from rpython.rlib.objectmodel import import_from_mixin -# The implementation of the buffer protocol. The basic idea is that we -# can ask any app-level object for a 'buffer' view on it, by calling its -# __buffer__() special method. It should return a wrapped instance of a -# subclass of the Buffer class defined below. Note that __buffer__() is -# a PyPy-only extension to the Python language, made necessary by the -# fact that it's not natural in PyPy to hack an interp-level-only -# interface. -# In normal usage, the convenience method space.buffer_w() should be -# used to get directly a Buffer instance. Doing so also gives you for -# free the typecheck that __buffer__() really returned a wrapped Buffer. - -import operator -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import compute_hash, import_from_mixin -from rpython.rlib.rstring import StringBuilder - - -class Buffer(W_Root): - """Abstract base class for memory views.""" - - __slots__ = () # no extra slot here +class Buffer(object): + """Abstract base class for buffers.""" + __slots__ = [] def getlength(self): raise NotImplementedError @@ -50,93 +30,10 @@ def is_writable(self): return False - # __________ app-level support __________ - - def descr_len(self, space): - return space.wrap(self.getlength()) - - def descr_getitem(self, space, w_index): - start, stop, step, size = space.decode_index4(w_index, self.getlength()) - if step == 0: # index only - return space.wrap(self.getitem(start)) - res = self.getslice(start, stop, step, size) - return space.wrap(res) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - if not isinstance(self, RWBuffer): - raise OperationError(space.w_TypeError, - space.wrap("buffer is read-only")) - start, stop, step, size = space.decode_index4(w_index, self.getlength()) - if step == 0: # index only - if len(newstring) != 1: - msg = 'buffer[index]=x: x must be a single character' - raise OperationError(space.w_TypeError, space.wrap(msg)) - char = newstring[0] # annotator hint - self.setitem(start, char) - elif step == 1: - if len(newstring) != size: - msg = "right operand length must match slice length" - raise OperationError(space.w_ValueError, space.wrap(msg)) - self.setslice(start, newstring) - else: - raise OperationError(space.w_ValueError, - space.wrap("buffer object does not support" - " slicing with a step")) - - def descr__buffer__(self, space): - return space.wrap(self) - - def descr_str(self, space): - return space.wrap(self.as_str()) - - @unwrap_spec(other='bufferstr') - def descr_add(self, space, other): - return space.wrap(self.as_str() + other) - - def _make_descr__cmp(name): - def descr__cmp(self, space, w_other): - if not isinstance(w_other, Buffer): - return space.w_NotImplemented - # xxx not the most efficient implementation - str1 = self.as_str() - str2 = w_other.as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - descr__cmp.func_name = name - return descr__cmp - - descr_eq = _make_descr__cmp('eq') - descr_ne = _make_descr__cmp('ne') - descr_lt = _make_descr__cmp('lt') - descr_le = _make_descr__cmp('le') - descr_gt = _make_descr__cmp('gt') - descr_ge = _make_descr__cmp('ge') - - def descr_hash(self, space): - return space.wrap(compute_hash(self.as_str())) - - def descr_mul(self, space, w_times): - # xxx not the most efficient implementation - w_string = space.wrap(self.as_str()) - # use the __mul__ method instead of space.mul() so that we - # return NotImplemented instead of raising a TypeError - return space.call_method(w_string, '__mul__', w_times) - - def descr_repr(self, space): - if isinstance(self, RWBuffer): - info = 'read-write buffer' - else: - info = 'read-only buffer' - addrstring = self.getaddrstring(space) - - return space.wrap("<%s for 0x%s, size %d>" % - (info, addrstring, self.getlength())) - class RWBuffer(Buffer): - """Abstract base class for read-write memory views.""" - - __slots__ = () # no extra slot here + """Abstract base class for read-write buffers.""" + __slots__ = [] def is_writable(self): return True @@ -151,76 +48,8 @@ self.setitem(start + i, string[i]) - at unwrap_spec(offset=int, size=int) -def descr_buffer__new__(space, w_subtype, w_object, offset=0, size=-1): - # w_subtype can only be exactly 'buffer' for now - if not space.is_w(w_subtype, space.gettypefor(Buffer)): - raise OperationError(space.w_TypeError, - space.wrap("argument 1 must be 'buffer'")) - - if space.isinstance_w(w_object, space.w_unicode): - # unicode objects support the old buffer interface - # but not the new buffer interface (change in python 2.7) - from rpython.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE - unistr = space.unicode_w(w_object) - builder = StringBuilder(len(unistr) * UNICODE_SIZE) - for unich in unistr: - pack_unichar(unich, builder) - from pypy.interpreter.buffer import StringBuffer - w_buffer = space.wrap(StringBuffer(builder.build())) - else: - w_buffer = space.buffer(w_object) - - buffer = space.interp_w(Buffer, w_buffer) # type-check - if offset == 0 and size == -1: - return w_buffer - # handle buffer slices - if offset < 0: - raise OperationError(space.w_ValueError, - space.wrap("offset must be zero or positive")) - if size < -1: - raise OperationError(space.w_ValueError, - space.wrap("size must be zero or positive")) - if isinstance(buffer, RWBuffer): - buffer = RWSubBuffer(buffer, offset, size) - else: - buffer = SubBuffer(buffer, offset, size) - return space.wrap(buffer) - - -Buffer.typedef = TypeDef( - "buffer", - __doc__ = """\ -buffer(object [, offset[, size]]) - -Create a new buffer object which references the given object. -The buffer will reference a slice of the target object from the -start of the object (or at the specified offset). The slice will -extend to the end of the target object (or with the specified size). -""", - __new__ = interp2app(descr_buffer__new__), - __len__ = interp2app(Buffer.descr_len), - __getitem__ = interp2app(Buffer.descr_getitem), - __setitem__ = interp2app(Buffer.descr_setitem), - __buffer__ = interp2app(Buffer.descr__buffer__), - __str__ = interp2app(Buffer.descr_str), - __add__ = interp2app(Buffer.descr_add), - __eq__ = interp2app(Buffer.descr_eq), - __ne__ = interp2app(Buffer.descr_ne), - __lt__ = interp2app(Buffer.descr_lt), - __le__ = interp2app(Buffer.descr_le), - __gt__ = interp2app(Buffer.descr_gt), - __ge__ = interp2app(Buffer.descr_ge), - __hash__ = interp2app(Buffer.descr_hash), - __mul__ = interp2app(Buffer.descr_mul), - __rmul__ = interp2app(Buffer.descr_mul), - __repr__ = interp2app(Buffer.descr_repr), -) -Buffer.typedef.acceptable_as_base_class = False - -# ____________________________________________________________ - class StringBuffer(Buffer): + __slots__ = ['value'] def __init__(self, value): self.value = value @@ -241,43 +70,12 @@ assert 0 <= start <= stop return self.value[start:stop] return "".join([self.value[start + i*step] for i in xrange(size)]) - - -class StringLikeBuffer(Buffer): - """For app-level objects that already have a string-like interface - with __len__ and a __getitem__ that returns characters or (with - slicing) substrings.""" - # XXX this is inefficient, it should only be used temporarily - - def __init__(self, space, w_obj): - self.space = space - self.w_obj = w_obj - - def getlength(self): - space = self.space - return space.len_w(self.w_obj) - - def getitem(self, index): - space = self.space - s = space.str_w(space.getitem(self.w_obj, space.wrap(index))) - if len(s) != 1: - raise OperationError(space.w_ValueError, - space.wrap("character expected, got string")) - char = s[0] # annotator hint - return char - - def getslice(self, start, stop, step, size): - space = self.space - if step != 1: - raise OperationError(space.w_ValueError, space.wrap( - "buffer object does not support slicing with a step")) - s = space.str_w(space.getslice(self.w_obj, space.wrap(start), - space.wrap(stop))) - return s - # ____________________________________________________________ + class SubBufferMixin(object): + _attrs_ = ['buffer', 'offset', 'size'] + def __init__(self, buffer, offset, size): self.buffer = buffer self.offset = offset @@ -299,11 +97,14 @@ if start == stop: return '' # otherwise, adding self.offset might make them # out of bounds - return self.buffer.getslice(self.offset + start, self.offset + stop, step, size) + return self.buffer.getslice(self.offset + start, self.offset + stop, + step, size) + class SubBuffer(Buffer): import_from_mixin(SubBufferMixin) + class RWSubBuffer(RWBuffer): import_from_mixin(SubBufferMixin) diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py --- a/pypy/interpreter/test/test_buffer.py +++ b/pypy/interpreter/test/test_buffer.py @@ -1,29 +1,25 @@ import py -from pypy.interpreter.buffer import Buffer from rpython.tool.udir import udir testdir = udir.ensure('test_buffer', dir=1) class TestBuffer: - def test_buffer_w(self): space = self.space w_hello = space.wrap('hello world') buf = space.buffer_w(w_hello) - assert isinstance(buf, Buffer) assert buf.getlength() == 11 assert buf.as_str() == 'hello world' assert buf.getslice(1, 6, 1, 5) == 'ello ' - assert space.buffer_w(space.wrap(buf)) is buf + assert space.buffer_w(space.newbuffer(buf)) is buf assert space.bufferstr_w(w_hello) == 'hello world' - assert space.bufferstr_w(space.buffer(w_hello)) == 'hello world' + assert space.bufferstr_w(space.newbuffer(space.buffer_w(w_hello))) == 'hello world' space.raises_w(space.w_TypeError, space.buffer_w, space.wrap(5)) - space.raises_w(space.w_TypeError, space.buffer, space.wrap(5)) def test_file_write(self): space = self.space - w_buffer = space.buffer(space.wrap('hello world')) + w_buffer = space.newbuffer(space.buffer_w(space.wrap('hello world'))) filename = str(testdir.join('test_file_write')) space.appexec([w_buffer, space.wrap(filename)], """(buffer, filename): f = open(filename, 'wb') @@ -44,4 +40,4 @@ space.bufferstr_w, space.wrap(u'\xe9')) -# Note: some app-level tests for buffer are in module/__builtin__/test/. +# Note: some app-level tests for buffer are in objspace/std/test/test_memoryview.py. diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -33,16 +33,11 @@ interpleveldefs = { # constants + '__debug__' : '(space.w_True)', # XXX 'None' : '(space.w_None)', 'False' : '(space.w_False)', 'True' : '(space.w_True)', - '__debug__' : '(space.w_True)', # XXX - 'type' : '(space.w_type)', - 'object' : '(space.w_object)', 'bytes' : '(space.w_str)', - 'unicode' : '(space.w_unicode)', - 'buffer' : 'interp_memoryview.W_Buffer', - 'memoryview' : 'interp_memoryview.W_MemoryView', 'file' : 'state.get(space).w_file', 'open' : 'state.get(space).w_file', diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py deleted file mode 100644 --- a/pypy/module/__builtin__/interp_memoryview.py +++ /dev/null @@ -1,168 +0,0 @@ -""" -Implementation of the 'buffer' and 'memoryview' types. -""" -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import buffer -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.error import OperationError -import operator - -W_Buffer = buffer.Buffer # actually implemented in pypy.interpreter.buffer - - -class W_MemoryView(W_Root): - """Implement the built-in 'memoryview' type as a thin wrapper around - an interp-level buffer. - """ - - def __init__(self, buf): - assert isinstance(buf, buffer.Buffer) - self.buf = buf - - def _make_descr__cmp(name): - def descr__cmp(self, space, w_other): - if isinstance(w_other, W_MemoryView): - # xxx not the most efficient implementation - str1 = self.as_str() - str2 = w_other.as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - - try: - w_buf = space.buffer(w_other) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - return space.w_NotImplemented - else: - str1 = self.as_str() - str2 = space.buffer_w(w_buf).as_str() - return space.wrap(getattr(operator, name)(str1, str2)) - descr__cmp.func_name = name - return descr__cmp - - descr_eq = _make_descr__cmp('eq') - descr_ne = _make_descr__cmp('ne') - descr_lt = _make_descr__cmp('lt') - descr_le = _make_descr__cmp('le') - descr_gt = _make_descr__cmp('gt') - descr_ge = _make_descr__cmp('ge') - - def as_str(self): - return self.buf.as_str() - - def getlength(self): - return self.buf.getlength() - - def getslice(self, start, stop): - if start < 0: - start = 0 - size = stop - start - if size < 0: - size = 0 - buf = self.buf - if isinstance(buf, buffer.RWBuffer): - buf = buffer.RWSubBuffer(buf, start, size) - else: - buf = buffer.SubBuffer(buf, start, size) - return W_MemoryView(buf) - - def descr_buffer(self, space): - """ - Note that memoryview() is very inconsistent in CPython: it does not - support the buffer interface but does support the new buffer - interface: as a result, it is possible to pass memoryview to - e.g. socket.send() but not to file.write(). For simplicity and - consistency, in PyPy memoryview DOES support buffer(), which means - that it is accepted in more places than CPython. - """ - return space.wrap(self.buf) - - def descr_tobytes(self, space): - return space.wrap(self.as_str()) - - def descr_tolist(self, space): - buf = self.buf - result = [] - for i in range(buf.getlength()): - result.append(space.wrap(ord(buf.getitem(i)))) - return space.newlist(result) - - def descr_getitem(self, space, w_index): - start, stop, step = space.decode_index(w_index, self.getlength()) - if step == 0: # index only - return space.wrap(self.buf.getitem(start)) - elif step == 1: - res = self.getslice(start, stop) - return space.wrap(res) - else: - raise OperationError(space.w_ValueError, - space.wrap("memoryview object does not support" - " slicing with a step")) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - buf = self.buf - if isinstance(buf, buffer.RWBuffer): - buf.descr_setitem(space, w_index, newstring) - else: - raise OperationError(space.w_TypeError, - space.wrap("cannot modify read-only memory")) - - def descr_len(self, space): - return self.buf.descr_len(space) - - def w_get_format(self, space): - return space.wrap("B") - - def w_get_itemsize(self, space): - return space.wrap(1) - - def w_get_ndim(self, space): - return space.wrap(1) - - def w_is_readonly(self, space): - return space.wrap(not isinstance(self.buf, buffer.RWBuffer)) - - def w_get_shape(self, space): - return space.newtuple([space.wrap(self.getlength())]) - - def w_get_strides(self, space): - return space.newtuple([space.wrap(1)]) - - def w_get_suboffsets(self, space): - # I've never seen anyone filling this field - return space.w_None - - -def descr_new(space, w_subtype, w_object): - memoryview = W_MemoryView(space.buffer(w_object)) - return space.wrap(memoryview) - -W_MemoryView.typedef = TypeDef( - "memoryview", - __doc__ = """\ -Create a new memoryview object which references the given object. -""", - __new__ = interp2app(descr_new), - __buffer__ = interp2app(W_MemoryView.descr_buffer), - __eq__ = interp2app(W_MemoryView.descr_eq), - __ge__ = interp2app(W_MemoryView.descr_ge), - __getitem__ = interp2app(W_MemoryView.descr_getitem), - __gt__ = interp2app(W_MemoryView.descr_gt), - __le__ = interp2app(W_MemoryView.descr_le), - __len__ = interp2app(W_MemoryView.descr_len), - __lt__ = interp2app(W_MemoryView.descr_lt), - __ne__ = interp2app(W_MemoryView.descr_ne), - __setitem__ = interp2app(W_MemoryView.descr_setitem), - tobytes = interp2app(W_MemoryView.descr_tobytes), - tolist = interp2app(W_MemoryView.descr_tolist), - format = GetSetProperty(W_MemoryView.w_get_format), - itemsize = GetSetProperty(W_MemoryView.w_get_itemsize), - ndim = GetSetProperty(W_MemoryView.w_get_ndim), - readonly = GetSetProperty(W_MemoryView.w_is_readonly), - shape = GetSetProperty(W_MemoryView.w_get_shape), - strides = GetSetProperty(W_MemoryView.w_get_strides), - suboffsets = GetSetProperty(W_MemoryView.w_get_suboffsets), - ) -W_MemoryView.typedef.acceptable_as_base_class = False diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -24,6 +24,17 @@ else: cls.w_safe_runtimerror = cls.space.wrap(sys.version_info < (2, 6)) + def test_builtin_names(self): + import __builtin__ + assert __builtin__.None is None + assert __builtin__.False is False + assert __builtin__.True is True + + assert __builtin__.buffer is buffer + assert __builtin__.bytes is str + assert __builtin__.dict is dict + assert __builtin__.memoryview is memoryview + def test_bytes_alias(self): assert bytes is str assert isinstance(eval("b'hi'"), str) @@ -489,24 +500,24 @@ def test_compile_error_message(self): import re compile('# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') - compile('\xef\xbb\xbf\n', 'dummy', 'exec') - compile('\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec') + compile(b'\xef\xbb\xbf\n', 'dummy', 'exec') + compile(b'\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec') exc = raises(SyntaxError, compile, - '# -*- coding: fake -*-\n', 'dummy', 'exec') - assert 'fake' in exc.value[0] + b'# -*- coding: fake -*-\n', 'dummy', 'exec') + assert 'fake' in str(exc.value) exc = raises(SyntaxError, compile, - '\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') - assert 'iso-8859-15' in exc.value[0] - assert 'BOM' in exc.value[0] + b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec') + assert 'iso-8859-15' in str(exc.value) + assert 'BOM' in str(exc.value) exc = raises(SyntaxError, compile, - '\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec') - assert 'fake' in exc.value[0] - assert 'BOM' in exc.value[0] + b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec') + assert 'fake' in str(exc.value) + assert 'BOM' in str(exc.value) def test_unicode_compile(self): try: compile(u'-', '?', 'eval') - except SyntaxError, e: + except SyntaxError as e: assert e.lineno == 1 def test_unicode_encoding_compile(self): diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -7,7 +7,6 @@ class ByteBuffer(RWBuffer): - def __init__(self, len): self.data = ['\x00'] * len @@ -23,4 +22,4 @@ @unwrap_spec(length=int) def bytebuffer(space, length): - return space.wrap(ByteBuffer(length)) + return space.newbuffer(ByteBuffer(length)) diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -4,7 +4,6 @@ from pypy.interpreter.typedef import TypeDef from rpython.rlib.rstring import UnicodeBuilder, StringBuilder from rpython.tool.sourcetools import func_with_new_name -from rpython.rlib import jit def create_builder(name, strtype, builder_cls): diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,9 +1,9 @@ -from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from pypy.objspace.std.memoryview import W_Buffer from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi @@ -39,38 +39,19 @@ copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) -class MiniBuffer(W_Root): - # a different subclass of W_Root for the MiniBuffer, because we - # want a slightly different (simplified) API at the level of Python. +# Override the typedef to narrow down the interface that's exposed to app-level +class MiniBuffer(W_Buffer): def __init__(self, buffer, keepalive=None): - self.buffer = buffer + W_Buffer.__init__(self, buffer) self.keepalive = keepalive - def descr_len(self, space): - return self.buffer.descr_len(space) - - def descr_getitem(self, space, w_index): - return self.buffer.descr_getitem(space, w_index) - - @unwrap_spec(newstring='bufferstr') - def descr_setitem(self, space, w_index, newstring): - self.buffer.descr_setitem(space, w_index, newstring) - - def descr__buffer__(self, space): - return self.buffer.descr__buffer__(space) - - def descr_str(self, space): - return space.wrap(self.buffer.as_str()) - - MiniBuffer.typedef = TypeDef( "buffer", __module__ = "_cffi_backend", __len__ = interp2app(MiniBuffer.descr_len), __getitem__ = interp2app(MiniBuffer.descr_getitem), __setitem__ = interp2app(MiniBuffer.descr_setitem), - __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), __str__ = interp2app(MiniBuffer.descr_str), ) diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -80,7 +80,6 @@ return (cfield.ctype, cfield.offset) def _copy_from_same(self, cdata, w_ob): - space = self.space if isinstance(w_ob, cdataobj.W_CData): if w_ob.ctype is self and self.size >= 0: misc._raw_memcopy(w_ob._cdata, cdata, self.size) diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -1,4 +1,3 @@ -import weakref from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import keepalive_until_here, specialize -from rpython.rlib.rarithmetic import r_uint, r_ulonglong, is_signed_integer_type +from rpython.rlib.rarithmetic import r_uint, r_ulonglong from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -1,7 +1,7 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rstring import UnicodeBuilder -from rpython.rlib.runicode import UNICHR, MAXUNICODE +from rpython.rlib.runicode import code_to_unichr, MAXUNICODE from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault @@ -541,7 +541,7 @@ if not 0 <= x <= 0x10FFFF: raise oefmt(space.w_TypeError, "character mapping must be in range(0x110000)") - return UNICHR(x) + return code_to_unichr(x) elif space.is_w(w_ch, space.w_None): # Charmap may return None return errorchar diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -1,3 +1,5 @@ +import sys + class AppTestCodecs: spaceconfig = { "usemodules": ['unicodedata', 'struct', 'binascii'], @@ -109,7 +111,7 @@ assert charmap_decode('xxx\xff', 'strict', map) == (u'xxx\xff', 4) exc = raises(TypeError, charmap_decode, '\xff', "strict", {0xff: 'a'}) - assert exc.value[0] == "character mapping must return integer, None or unicode" + assert str(exc.value) == "character mapping must return integer, None or unicode" raises(TypeError, charmap_decode, '\xff', "strict", {0xff: 0x110000}) assert (charmap_decode("\x00\x01\x02", "strict", {0: 0x10FFFF, 1: ord('b'), 2: ord('c')}) == @@ -137,7 +139,9 @@ class AppTestPartialEvaluation: - spaceconfig = dict(usemodules=('array',)) + spaceconfig = dict(usemodules=['array',]) + if sys.platform == 'win32': + spaceconfig['usemodules'].append('_winreg') def test_partial_utf8(self): import _codecs @@ -330,12 +334,12 @@ raises(UnicodeDecodeError, decode, r"\U00110000") assert decode(r"\U00110000", "ignore") == (u"", 10) assert decode(r"\U00110000", "replace") == (u"\ufffd", 10) - exc = raises(UnicodeDecodeError, unicode_escape_decode, "\u1z32z3", 'strict') - assert str(exc.value) == "'unicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX escape" - exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, "\u1z32z3", 'strict') - assert str(exc.value) == "'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" - exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, "\U1z32z3", 'strict') - assert str(exc.value) == "'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" + exc = raises(UnicodeDecodeError, unicode_escape_decode, b"\u1z32z3", 'strict') + assert str(exc.value) == r"'unicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX escape" + exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, b"\u1z32z3", 'strict') + assert str(exc.value) == r"'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" + exc = raises(UnicodeDecodeError, raw_unicode_escape_decode, b"\U1z32z3", 'strict') + assert str(exc.value) == r"'rawunicodeescape' codec can't decode bytes in position 0-2: truncated \uXXXX" def test_escape_encode(self): assert '"'.encode('string_escape') == '"' @@ -596,7 +600,7 @@ l = [u"<%d>" % ord(exc.object[pos]) for pos in xrange(exc.start, exc.end)] return (u"[%s]" % u"".join(l), exc.end) codecs.register_error("test.handler1", handler1) - assert "\\u3042\u3xxx".decode("unicode-escape", "test.handler1") == \ + assert b"\\u3042\u3xxx".decode("unicode-escape", "test.handler1") == \ u"\u3042[<92><117><51>]xxx" def test_encode_error_bad_handler(self): @@ -615,9 +619,9 @@ import codecs exc = raises(TypeError, codecs.charmap_encode, u'\xff', "replace", {0xff: 300}) - assert exc.value[0] == 'character mapping must be in range(256)' + assert str(exc.value) == 'character mapping must be in range(256)' exc = raises(TypeError, codecs.charmap_encode, u'\xff', "replace", {0xff: u'a'}) - assert exc.value[0] == 'character mapping must return integer, None or str' + assert str(exc.value) == 'character mapping must return integer, None or str' raises(UnicodeError, codecs.charmap_encode, u"\xff", "replace", {0xff: None}) def test_charmap_encode_replace(self): @@ -649,22 +653,22 @@ def test_utf7_errors(self): import codecs tests = [ - ('a\xffb', u'a\ufffdb'), - ('a+IK', u'a\ufffd'), - ('a+IK-b', u'a\ufffdb'), - ('a+IK,b', u'a\ufffdb'), - ('a+IKx', u'a\u20ac\ufffd'), - ('a+IKx-b', u'a\u20ac\ufffdb'), - ('a+IKwgr', u'a\u20ac\ufffd'), - ('a+IKwgr-b', u'a\u20ac\ufffdb'), - ('a+IKwgr,', u'a\u20ac\ufffd'), - ('a+IKwgr,-b', u'a\u20ac\ufffd-b'), - ('a+IKwgrB', u'a\u20ac\u20ac\ufffd'), - ('a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'), - ('a+/,+IKw-b', u'a\ufffd\u20acb'), - ('a+//,+IKw-b', u'a\ufffd\u20acb'), - ('a+///,+IKw-b', u'a\uffff\ufffd\u20acb'), - ('a+////,+IKw-b', u'a\uffff\ufffd\u20acb'), + (b'a\xffb', u'a\ufffdb'), + (b'a+IK', u'a\ufffd'), + (b'a+IK-b', u'a\ufffdb'), + (b'a+IK,b', u'a\ufffdb'), + (b'a+IKx', u'a\u20ac\ufffd'), + (b'a+IKx-b', u'a\u20ac\ufffdb'), + (b'a+IKwgr', u'a\u20ac\ufffd'), + (b'a+IKwgr-b', u'a\u20ac\ufffdb'), + (b'a+IKwgr,', u'a\u20ac\ufffd'), + (b'a+IKwgr,-b', u'a\u20ac\ufffd-b'), + (b'a+IKwgrB', u'a\u20ac\u20ac\ufffd'), + (b'a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'), + (b'a+/,+IKw-b', u'a\ufffd\u20acb'), + (b'a+//,+IKw-b', u'a\ufffd\u20acb'), + (b'a+///,+IKw-b', u'a\uffff\ufffd\u20acb'), + (b'a+////,+IKw-b', u'a\uffff\ufffd\u20acb'), ] for raw, expected in tests: raises(UnicodeDecodeError, codecs.utf_7_decode, raw, 'strict', True) @@ -694,9 +698,19 @@ import sys if sys.platform != 'win32': return + toencode = u'caf\xe9', 'caf\xe9' + try: + # test for non-latin1 codepage, more general test needed + import _winreg + key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, + r'System\CurrentControlSet\Control\Nls\CodePage') + if _winreg.QueryValueEx(key, 'ACP')[0] == u'1255': # non-latin1 + toencode = u'caf\xbf','caf\xbf' + except: + assert False, 'cannot test mbcs on this windows system, check code page' assert u'test'.encode('mbcs') == 'test' - assert u'caf\xe9'.encode('mbcs') == 'caf\xe9' - assert u'\u040a'.encode('mbcs') == '?' # some cyrillic letter + assert toencode[0].encode('mbcs') == toencode[1] + assert u'\u040a'.encode('mbcs') == '?' # some cyrillic letter assert 'cafx\e9'.decode('mbcs') == u'cafx\e9' def test_bad_handler_string_result(self): diff --git a/pypy/module/_file/__init__.py b/pypy/module/_file/__init__.py --- a/pypy/module/_file/__init__.py +++ b/pypy/module/_file/__init__.py @@ -1,7 +1,6 @@ - # Package initialisation from pypy.interpreter.mixedmodule import MixedModule -import sys + class Module(MixedModule): appleveldefs = { diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -254,6 +254,13 @@ if '__pypy__' in sys.builtin_module_names: assert repr(self.temppath) in g.getvalue() + def test_truncate(self): + f = self.file(self.temppath, "w") + f.write("foo") + f.close() + with self.file(self.temppath, 'r') as f: + raises(IOError, f.truncate, 100) + class AppTestNonblocking(object): def setup_class(cls): diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -200,6 +200,10 @@ assert f.closed == True def test_repr(self): + import sys + if '__pypy__' not in sys.builtin_module_names and \ + sys.version_info < (2, 7, 4): + skip("see cpython issue14161") assert repr(self.file).startswith( " 3: scope_id = space.uint_w(pieces_w[3]) else: scope_id = 0 - if flowinfo < 0 or flowinfo > 0xfffff: - raise OperationError(space.w_OverflowError, space.wrap( - "flowinfo must be 0-1048575.")) - flowinfo = rffi.cast(lltype.Unsigned, flowinfo) + flowinfo = make_unsigned_flowinfo(space, flowinfo) a = addr.lock(_c.sockaddr_in6) rffi.setintfield(a, 'c_sin6_port', rsocket.htons(port)) rffi.setintfield(a, 'c_sin6_flowinfo', rsocket.htonl(flowinfo)) @@ -97,10 +94,7 @@ else: flowinfo = 0 if len(pieces_w) > 3: scope_id = space.uint_w(pieces_w[3]) else: scope_id = 0 - if flowinfo < 0 or flowinfo > 0xfffff: - raise OperationError(space.w_OverflowError, space.wrap( - "flowinfo must be 0-1048575.")) - flowinfo = rffi.cast(lltype.Unsigned, flowinfo) + flowinfo = make_unsigned_flowinfo(space, flowinfo) return rsocket.INET6Address(host, port, flowinfo, scope_id) if rsocket.HAS_AF_UNIX and family == rsocket.AF_UNIX: return rsocket.UNIXAddress(space.str_w(w_address)) @@ -112,10 +106,16 @@ # XXX Hack to seperate rpython and pypy def make_ushort_port(space, port): if port < 0 or port > 0xffff: - raise OperationError(space.w_ValueError, space.wrap( + raise OperationError(space.w_OverflowError, space.wrap( "port must be 0-65535.")) return rffi.cast(rffi.USHORT, port) +def make_unsigned_flowinfo(space, flowinfo): + if flowinfo < 0 or flowinfo > 0xfffff: + raise OperationError(space.w_OverflowError, space.wrap( + "flowinfo must be 0-1048575.")) + return rffi.cast(lltype.Unsigned, flowinfo) + # XXX Hack to seperate rpython and pypy def ipaddr_from_object(space, w_sockaddr): host = space.str_w(space.getitem(w_sockaddr, space.wrap(0))) @@ -536,13 +536,9 @@ @unwrap_spec(family=int, type=int, proto=int) def newsocket(space, w_subtype, family=AF_INET, type=SOCK_STREAM, proto=0): - # XXX If we want to support subclassing the socket type we will need - # something along these lines. But allocate_instance is only defined - # on the standard object space, so this is not really correct. - #sock = space.allocate_instance(W_RSocket, w_subtype) - #Socket.__init__(sock, space, fd, family, type, proto) + sock = space.allocate_instance(W_RSocket, w_subtype) try: - sock = W_RSocket(family, type, proto) + W_RSocket.__init__(sock, family, type, proto) except SocketError, e: raise converted_error(space, e) return space.wrap(sock) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -399,7 +399,7 @@ name = s.getpeername() # Will raise socket.error if not connected assert name[1] == 80 s.close() - + def test_socket_connect_ex(self): import _socket s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) @@ -425,8 +425,13 @@ def test_bigport(self): import _socket s = _socket.socket() - raises(ValueError, s.connect, ("localhost", 1000000)) - raises(ValueError, s.connect, ("localhost", -1)) + exc = raises(OverflowError, s.connect, ("localhost", -1)) + assert "port must be 0-65535." in str(exc.value) + exc = raises(OverflowError, s.connect, ("localhost", 1000000)) + assert "port must be 0-65535." in str(exc.value) + s = _socket.socket(_socket.AF_INET6) + exc = raises(OverflowError, s.connect, ("::1", 1234, 1048576)) + assert "flowinfo must be 0-1048575." in str(exc.value) def test_NtoH(self): import sys @@ -474,6 +479,13 @@ import socket s = socket.socket() + def test_subclass(self): + from _socket import socket + class MySock(socket): + blah = 123 + s = MySock() + assert s.blah == 123 + def test_getsetsockopt(self): import _socket as socket import struct @@ -575,11 +587,11 @@ class AppTestSocketTCP: + HOST = 'localhost' + def setup_class(cls): cls.space = space - HOST = 'localhost' - def setup_method(self, method): w_HOST = space.wrap(self.HOST) self.w_serv = space.appexec([w_socket, w_HOST], @@ -589,6 +601,7 @@ serv.listen(1) return serv ''') + def teardown_method(self, method): if hasattr(self, 'w_serv'): space.appexec([self.w_serv], '(serv): serv.close()') @@ -609,7 +622,7 @@ raises(error, raise_error) def test_recv_send_timeout(self): - from _socket import socket, timeout + from _socket import socket, timeout, SOL_SOCKET, SO_RCVBUF, SO_SNDBUF cli = socket() cli.connect(self.serv.getsockname()) t, addr = self.serv.accept() @@ -629,6 +642,9 @@ assert count is None buf = t.recv(1) assert buf == '?' + # speed up filling the buffers + t.setsockopt(SOL_SOCKET, SO_RCVBUF, 4096) + cli.setsockopt(SOL_SOCKET, SO_SNDBUF, 4096) # test send() timeout count = 0 try: @@ -656,7 +672,7 @@ conn, addr = self.serv.accept() buf = buffer(MSG) conn.send(buf) - buf = array.array('c', ' '*1024) + buf = array.array('c', ' ' * 1024) nbytes = cli.recv_into(buf) assert nbytes == len(MSG) msg = buf.tostring()[:len(MSG)] @@ -671,7 +687,7 @@ conn, addr = self.serv.accept() buf = buffer(MSG) conn.send(buf) - buf = array.array('c', ' '*1024) + buf = array.array('c', ' ' * 1024) nbytes, addr = cli.recvfrom_into(buf) assert nbytes == len(MSG) msg = buf.tostring()[:len(MSG)] @@ -682,6 +698,7 @@ cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) assert cli.family == socket.AF_INET + class AppTestErrno: def setup_class(cls): cls.space = space diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -35,7 +35,7 @@ SOCKET_HAS_TIMED_OUT, SOCKET_HAS_BEEN_CLOSED = 2, 3 SOCKET_TOO_LARGE_FOR_SELECT, SOCKET_OPERATION_OK = 4, 5 -HAVE_RPOLL = True # Even win32 has rpoll.poll +HAVE_RPOLL = 'poll' in dir(rpoll) constants = {} constants["SSL_ERROR_ZERO_RETURN"] = PY_SSL_ERROR_ZERO_RETURN diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -1,5 +1,5 @@ class AppTestSSL: - spaceconfig = dict(usemodules=('_ssl', '_socket')) + spaceconfig = dict(usemodules=('_ssl', '_socket', 'thread')) def setup_class(cls): import os diff --git a/pypy/module/_ssl/thread_lock.py b/pypy/module/_ssl/thread_lock.py --- a/pypy/module/_ssl/thread_lock.py +++ b/pypy/module/_ssl/thread_lock.py @@ -1,4 +1,5 @@ -from rpython.rlib.ropenssl import * +from rpython.rlib import rthread +from rpython.rlib.ropenssl import libraries from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -22,7 +23,6 @@ # without caring about the GIL. separate_module_source = """ - #include static unsigned int _ssl_locks_count = 0; @@ -62,13 +62,12 @@ } """ -from rpython.rlib import rthread - eci = rthread.eci.merge(ExternalCompilationInfo( separate_module_sources=[separate_module_source], post_include_bits=[ "int _PyPy_SSL_SetupThreads(void);"], export_symbols=['_PyPy_SSL_SetupThreads'], + libraries = libraries, )) _PyPy_SSL_SetupThreads = rffi.llexternal('_PyPy_SSL_SetupThreads', diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -132,6 +132,9 @@ self.len = 0 self.allocated = 0 + def buffer_w(self, space): + return ArrayBuffer(self) + def descr_append(self, space, w_x): """ append(x) @@ -462,9 +465,6 @@ # Misc methods - def descr_buffer(self, space): - return space.wrap(ArrayBuffer(self)) - def descr_repr(self, space): if self.len == 0: return space.wrap("array('%s')" % self.typecode) @@ -508,7 +508,6 @@ __radd__ = interp2app(W_ArrayBase.descr_radd), __rmul__ = interp2app(W_ArrayBase.descr_rmul), - __buffer__ = interp2app(W_ArrayBase.descr_buffer), __repr__ = interp2app(W_ArrayBase.descr_repr), itemsize = GetSetProperty(descr_itemsize), diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1,25 +1,8 @@ import sys -import py -import py.test - - -## class AppTestSimpleArray: -## spaceconfig = dict(usemodules=('array',)) -## def setup_class(cls): -## cls.w_simple_array = cls.space.appexec([], """(): -## import array -## return array.simple_array -## """) - -## def test_simple(self): -## a = self.simple_array(10) -## a[5] = 7.42 -## assert a[5] == 7.42 +import pytest class BaseArrayTests: - - def test_ctor(self): assert len(self.array('c')) == 0 assert len(self.array('i')) == 0 @@ -563,7 +546,6 @@ assert not a > 2*a assert not a >= 2*a - def test_reduce(self): import pickle a = self.array('i', [1, 2, 3]) @@ -794,7 +776,6 @@ assert img[3, 25] == 3 * 9 - def test_override_from(self): class mya(self.array): def fromlist(self, lst): @@ -879,41 +860,41 @@ def test_assign_object_with_special_methods(self): from array import array - + class Num(object): def __float__(self): return 5.25 - + def __int__(self): return 7 - + class NotNum(object): pass - + class Silly(object): def __float__(self): return None - + def __int__(self): - return None + return None class OldNum: def __float__(self): return 6.25 - + def __int__(self): return 8 - + class OldNotNum: pass - + class OldSilly: def __float__(self): return None - + def __int__(self): return None - + for tc in 'bBhHiIlL': a = array(tc, [0]) raises(TypeError, a.__setitem__, 0, 1.0) @@ -931,7 +912,7 @@ a = array(tc, [0]) a[0] = 1.0 a[0] = 1 - a[0] = Num() + a[0] = Num() assert a[0] == 5.25 raises(TypeError, a.__setitem__, NotNum()) a[0] = OldNum() @@ -939,24 +920,23 @@ raises(TypeError, a.__setitem__, OldNotNum()) raises(TypeError, a.__setitem__, Silly()) raises(TypeError, a.__setitem__, OldSilly()) - + a = array('c', 'hi') a[0] = 'b' assert a[0] == 'b' - + a = array('u', u'hi') a[0] = u'b' From noreply at buildbot.pypy.org Mon Mar 31 22:43:19 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 31 Mar 2014 22:43:19 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: fix test for 32 bit Message-ID: <20140331204319.D23941D29F4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70361:8232f40c534a Date: 2014-03-31 23:02 +0300 http://bitbucket.org/pypy/pypy/changeset/8232f40c534a/ Log: fix test for 32 bit diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -265,10 +265,10 @@ assert (it.operands[1] == a.sum(axis=2)).all() def test_get_dtypes(self): - from numpy import array, dtype, nditer + from numpy import array, nditer x = array([1, 2]) y = array([1.0, 2.0]) - assert nditer([x, y]).dtypes == (dtype("int64"), dtype("float64")) + assert nditer([x, y]).dtypes == (x.dtype, y.dtype) def test_multi_index(self): import numpy as np From noreply at buildbot.pypy.org Mon Mar 31 22:43:21 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 31 Mar 2014 22:43:21 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: start fixing op_axes handling Message-ID: <20140331204321.216181D29F4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r70362:9d764d733c7a Date: 2014-03-31 23:42 +0300 http://bitbucket.org/pypy/pypy/changeset/9d764d733c7a/ Log: start fixing op_axes handling diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -277,6 +277,7 @@ self.index_iter = None self.done = False self.first_next = True + self.op_axes = [] if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) @@ -333,15 +334,21 @@ op_axes = space.listview(w_op_axes) l = -1 for w_axis in op_axes: - if not space.is_(w_axis, space.w_None): + if not space.is_none(w_axis): axis_len = space.len_w(w_axis) if l == -1: l = axis_len elif axis_len != l: raise OperationError(space.w_ValueError, space.wrap("Each entry of op_axes must have the same size")) - self.op_axes.append([space.int_w(x) if not space.is_(x, space.w_None) else space.w_None for x in space.listview(w_axis)]) + self.op_axes.append([space.int_w(x) if not space.is_none(x) else space.w_None for x in space.listview(w_axis)]) if l == -1: raise OperationError(space.w_ValueError, space.wrap("If op_axes is provided, at least one list of axes must be contained within it")) + raise Exception('xxx TODO') + # Check that values make sense: + # - in bounds for each operand + # ValueError: Iterator input op_axes[0][3] (==3) is not a valid axis of op[0], which has 2 dimensions + # - no repeat axis + # ValueError: The 'op_axes' provided to the iterator constructor for operand 1 contained duplicate value 0 def descr_iter(self, space): return space.wrap(self) From noreply at buildbot.pypy.org Mon Mar 31 23:26:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 31 Mar 2014 23:26:39 +0200 (CEST) Subject: [pypy-commit] stmgc default: Extra asserts Message-ID: <20140331212639.78E6F1C0161@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1127:ba29f5ab1dcd Date: 2014-03-31 23:24 +0200 http://bitbucket.org/pypy/stmgc/changeset/ba29f5ab1dcd/ Log: Extra asserts diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -683,6 +683,10 @@ STM_SEGMENT->jmpbuf_ptr = NULL; clear_callbacks_on_abort(); } + else { + assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE); + assert(STM_SEGMENT->jmpbuf_ptr == NULL); + } s_mutex_unlock(); } From noreply at buildbot.pypy.org Mon Mar 31 23:31:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 31 Mar 2014 23:31:47 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Debugging tweaks, and the one issue it found, which was Message-ID: <20140331213147.5C82C1C0161@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r70363:901b34818a18 Date: 2014-03-31 23:24 +0200 http://bitbucket.org/pypy/pypy/changeset/901b34818a18/ Log: Debugging tweaks, and the one issue it found, which was stm_become_globally_unique_transaction() not resetting pypy_stm_nursery_low_fill_mark to 0 diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -134,8 +134,7 @@ return 'pypy_stm_become_inevitable(%s);' % (string_literal,) def stm_become_globally_unique_transaction(funcgen, op): - return ('stm_become_globally_unique_transaction(&stm_thread_local,' - ' "for the JIT");') + return 'pypy_stm_become_globally_unique_transaction();' def stm_push_root(funcgen, op): arg0 = funcgen.expr(op.args[0]) diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -94,6 +94,7 @@ void pypy_stm_start_transaction(stm_jmpbuf_t *jmpbuf_ptr, volatile long *v_counter) { + pypy_stm_nursery_low_fill_mark = 1; /* will be set to a correct value below */ _stm_start_transaction(&stm_thread_local, jmpbuf_ptr); /* If v_counter==0, initialize 'pypy_stm_nursery_low_fill_mark' @@ -181,13 +182,13 @@ assert(pypy_stm_nursery_low_fill_mark != 0); assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); stm_commit_transaction(); + pypy_stm_nursery_low_fill_mark = 0; stm_start_inevitable_transaction(&stm_thread_local); - pypy_stm_nursery_low_fill_mark = 0; } else { - _stm_become_inevitable("perform_transaction left with atomic"); assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); pypy_stm_nursery_low_fill_mark_saved = 0; + _stm_become_inevitable("perform_transaction left with atomic"); } } /* double-check */ @@ -205,13 +206,8 @@ assert(v_old_shadowstack == stm_thread_local.shadowstack); } -void _pypy_stm_become_inevitable(const char *msg) +static void _pypy_stm_inev_state(void) { - if (msg == NULL) { - msg = "return from JITted function"; - } - _stm_become_inevitable(msg); - if (pypy_stm_ready_atomic == 1) { pypy_stm_nursery_low_fill_mark = 0; } @@ -220,3 +216,18 @@ pypy_stm_nursery_low_fill_mark_saved = 0; } } + +void _pypy_stm_become_inevitable(const char *msg) +{ + _pypy_stm_inev_state(); + if (msg == NULL) { + msg = "return from JITted function"; + } + _stm_become_inevitable(msg); +} + +void pypy_stm_become_globally_unique_transaction(void) +{ + _pypy_stm_inev_state(); + stm_become_globally_unique_transaction(&stm_thread_local, "for the JIT"); +} diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -24,6 +24,7 @@ void pypy_stm_unregister_thread_local(void); /* generated into stm_prebuilt.c */ void _pypy_stm_become_inevitable(const char *); +void pypy_stm_become_globally_unique_transaction(void); static inline void pypy_stm_become_inevitable(const char *msg) @@ -46,8 +47,8 @@ static inline void pypy_stm_start_inevitable_if_not_atomic(void) { if (pypy_stm_ready_atomic == 1) { int e = errno; + pypy_stm_nursery_low_fill_mark = 0; stm_start_inevitable_transaction(&stm_thread_local); - pypy_stm_nursery_low_fill_mark = 0; errno = e; } }